diff --git a/.github/mergify.yml b/.github/mergify.yml index 61ef090a768..51e43ba61de 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -9,8 +9,8 @@ pull_request_rules: - "check-success=tox (3.9.18)" - check-success=WIP actions: - queue: - merge_method: merge + merge: + method: merge - name: ask to resolve conflict conditions: - conflict diff --git a/ceph/ceph.py b/ceph/ceph.py index bbc31547c72..0ca8289c28c 100644 --- a/ceph/ceph.py +++ b/ceph/ceph.py @@ -1719,13 +1719,26 @@ def reconnect(self): def __getstate__(self): d = dict(self.__dict__) - del d["vm_node"] - del d["rssh"] - del d["ssh"] - del d["rssh_transport"] - del d["ssh_transport"] - del d["root_connection"] - del d["connection"] + if d.get("vm_node"): + del d["vm_node"] + + if d.get("rssh"): + del d["rssh"] + + if d.get("ssh"): + del d["ssh"] + + if d.get("rssh_transport"): + del d["rssh_transport"] + + if d.get("ssh_transport"): + del d["ssh_transport"] + + if d.get("ssh_transport"): + del d["root_connection"] + + if d.get("connection"): + del d["connection"] return d diff --git a/ceph/parallel.py b/ceph/parallel.py index 7e6cb10fe55..42c8b9861e4 100644 --- a/ceph/parallel.py +++ b/ceph/parallel.py @@ -39,12 +39,14 @@ If one of the spawned functions throws an exception, it will be thrown when iterating over the results, or when the with block ends. -At the end of the with block, the main thread waits until all -spawned functions have completed, or, if one exited with an exception, -kills the rest and raises the exception. +When the scope of with block changes, the main thread waits until all +spawned functions have completed within the given timeout. On timeout, +all pending threads/processes are issued shutdown command. """ import logging -from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor, as_completed +from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor +from datetime import datetime, timedelta +from time import sleep logger = logging.getLogger(__name__) @@ -56,7 +58,6 @@ def __init__( self, thread_pool=True, timeout=None, - shutdown_wait=True, shutdown_cancel_pending=False, ): """Object initialization method. @@ -64,16 +65,22 @@ def __init__( Args: thread_pool (bool) Whether to use threads or processes. timeout (int | float) Maximum allowed time. - shutdown_wait (bool) If disabled, it would not wait for executing - threads/process to complete. shutdown_cancel_pending (bool) If enabled, it would cancel pending tasks. """ self._executor = ThreadPoolExecutor() if thread_pool else ProcessPoolExecutor() self._timeout = timeout - self._shutdown_wait = shutdown_wait self._cancel_pending = shutdown_cancel_pending self._futures = list() self._results = list() + self._iter_index = 0 + + @property + def count(self): + return len(self._futures) + + @property + def results(self): + return self._results def spawn(self, fun, *args, **kwargs): """Triggers the first class method. @@ -93,30 +100,56 @@ def __enter__(self): return self def __exit__(self, exc_type, exc_value, trackback): - _exceptions = [] - exception_count = 0 - - for _f in as_completed(self._futures, timeout=self._timeout): - try: + _not_done = self._futures[:] + _end_time = datetime.now() + timedelta( + seconds=self._timeout if self._timeout else 3600 + ) + + # Wait for all futures to complete within the given time or 1 hour. + while datetime.now() < _end_time: + # if the list is empty break + if len(_not_done) == 0: + break + + sleep(2.0) + for _f in _not_done: + if _f.done(): + _not_done.remove(_f) + + # Graceful shutdown of running threads + if _not_done: + self._executor.shutdown(wait=False, cancel_futures=self._cancel_pending) + + if exc_value is not None: + logger.exception(trackback) + return False + + # Check for any exceptions and raise + # At this point, all threads/processes should have completed or cancelled + try: + for _f in self._futures: self._results.append(_f.result()) - except Exception as e: - logger.exception(e) - _exceptions.append(e) - exception_count += 1 - - if exception_count > 0 and not self._shutdown_wait: - # At this point we are ignoring results - self._executor.shutdown(wait=False, cancel_futures=self._cancel_pending) - raise _exceptions[0] - - if len(_exceptions) > 0: - raise _exceptions[0] + except Exception: + logger.exception("Encountered an exception during parallel execution.") + raise - return False if exception_count == 0 else True + return True def __iter__(self): return self def __next__(self): - for r in self._results: - yield r + if self.count == 0 or self._iter_index == self.count: + self._iter_index = 0 # reset the counter + raise StopIteration() + + try: + # Keeping timeout consistent when called within the context + _timeout = self._timeout if self._timeout else 3600 + out = self._futures[self._iter_index].result(timeout=_timeout) + except Exception as e: + logger.exception(e) + out = e + + self._iter_index += 1 + return out diff --git a/cli/ceph/fs/sub_volume.py b/cli/ceph/fs/sub_volume.py index c2cb906ca41..610e9317cd8 100644 --- a/cli/ceph/fs/sub_volume.py +++ b/cli/ceph/fs/sub_volume.py @@ -83,3 +83,43 @@ def getpath(self, volume, subvolume, **kwargs): if isinstance(out, tuple): return out[0].strip() return out + + def set_subvolume_earmark(self, volume, subvolume, earmark, **kwargs): + """ + Sets an earmark to the subvolume + Args: + volume (str): Name of vol where subvol is present + subvolume (str): subvol name + earmark (str): earmark name + """ + cmd = f"{self.base_cmd} earmark set {volume} {subvolume} --earmark {earmark} {build_cmd_from_args(**kwargs)}" + out = self.execute(sudo=True, cmd=cmd) + if isinstance(out, tuple): + return out[0].strip() + return out + + def get_subvolume_earmark(self, volume, subvolume, **kwargs): + """ + Gets earmark from subvolume, if earmark is already present + Args: + volume (str): Name of vol where subvol is present + subvolume (str): subvol name + """ + cmd = f"{self.base_cmd} earmark get {volume} {subvolume} {build_cmd_from_args(**kwargs)}" + out = self.execute(sudo=True, cmd=cmd) + if isinstance(out, tuple): + return out[0].strip() + return out + + def remove_subvolume_earmark(self, volume, subvolume, **kwargs): + """ + Remove the earmark from subvolume + Args: + volume (str): Name of vol where subvol is present + subvolume (str): subvol name + """ + cmd = f"{self.base_cmd} earmark rm {volume} {subvolume} {build_cmd_from_args(**kwargs)}" + out = self.execute(sudo=True, cmd=cmd) + if isinstance(out, tuple): + return out[0].strip() + return out diff --git a/conf/squid/common/3node-1client.yaml b/conf/squid/common/3node-1client.yaml new file mode 100644 index 00000000000..da34d00bce7 --- /dev/null +++ b/conf/squid/common/3node-1client.yaml @@ -0,0 +1,43 @@ +globals: + - ceph-cluster: + name: ceph + node1: + role: + - _admin + - installer + - mon + - mgr + - osd + - crash + - grafana + - prometheus + - node-exporter + - alertmanager + no-of-volumes: 6 + disk-size: 20 + node2: + role: + - mon + - mgr + - mds + - osd + - rgw + - crash + - node-exporter + - alertmanager + no-of-volumes: 6 + disk-size: 20 + node3: + role: + - mon + - mgr + - mds + - osd + - rgw + - crash + - node-exporter + no-of-volumes: 6 + disk-size: 20 + node4: + role: + - client diff --git a/conf/squid/common/5node-2client-rh.yaml b/conf/squid/common/5node-2client-rh.yaml new file mode 100644 index 00000000000..ef5e97b2b4b --- /dev/null +++ b/conf/squid/common/5node-2client-rh.yaml @@ -0,0 +1,58 @@ +globals: + - ceph-cluster: + name: ceph + node1: + role: + - _admin + - installer + - mon + - mgr + - crash + - grafana + - prometheus + - alertmanager + node2: + role: + - mon + - mgr + - osd + - rgw + - nfs + - crash + no-of-volumes: 6 + disk-size: 40 + node3: + role: + - mon + - mgr + - osd + - rgw + - crash + no-of-volumes: 6 + disk-size: 40 + node4: + role: + - mon + - mgr + - mds + - osd + - crash + no-of-volumes: 6 + disk-size: 40 + node5: + role: + - mon + - mgr + - mds + - osd + - crash + no-of-volumes: 6 + disk-size: 40 + node6: + id: node6 + role: + - client + node7: + id: node7 + role: + - client diff --git a/conf/squid/common/7node-2client-ibm.yaml b/conf/squid/common/7node-2client-ibm.yaml new file mode 100644 index 00000000000..57243204b77 --- /dev/null +++ b/conf/squid/common/7node-2client-ibm.yaml @@ -0,0 +1,70 @@ +globals: + - ceph-cluster: + name: ceph + node1: + role: + - _admin + - installer + - mon + - mgr + - crash + - grafana + - prometheus + - alertmanager + node2: + role: + - mon + - mgr + - osd + - rgw + - nfs + - crash + no-of-volumes: 6 + disk-size: 40 + node3: + role: + - mon + - mgr + - osd + - rgw + - crash + no-of-volumes: 6 + disk-size: 40 + node4: + role: + - mon + - mgr + - mds + - osd + - crash + no-of-volumes: 6 + disk-size: 40 + node5: + role: + - mon + - mgr + - mds + - osd + - crash + no-of-volumes: 6 + disk-size: 40 + node6: + role: + - mon + - mgr + - crash + - nvmeof-gw + node7: + role: + - mon + - mgr + - crash + - nvmeof-gw + node8: + id: node8 + role: + - client + node9: + id: node9 + role: + - client diff --git a/suites/reef/cephfs/tier-4_cephfs_recovery.yaml b/suites/reef/cephfs/tier-4_cephfs_recovery.yaml index 5db136b9368..3614b6c1afc 100644 --- a/suites/reef/cephfs/tier-4_cephfs_recovery.yaml +++ b/suites/reef/cephfs/tier-4_cephfs_recovery.yaml @@ -233,3 +233,4 @@ tests: module: cephfs_journal_tool.cephfs_journal_tool_event_mode.py name: journal_tool_event_mode polarion-id: "CEPH-83595482" + comments: "BZ 2335321" diff --git a/suites/reef/rgw/tier-1_rgw.yaml b/suites/reef/rgw/tier-1_rgw.yaml index 8d3da3960b4..0a2cb637734 100644 --- a/suites/reef/rgw/tier-1_rgw.yaml +++ b/suites/reef/rgw/tier-1_rgw.yaml @@ -170,6 +170,22 @@ tests: config: script-name: ../s3cmd/test_s3cmd.py config-file-name: ../../s3cmd/configs/test_get_s3cmd.yaml + - test: + name: Header validation of Bucket Lifecycle expiration with date + desc: Test header validation bucket lifecycle expiration with date + polarion-id: CEPH-83573254 + module: sanity_rgw.py + config: + script-name: test_bucket_lifecycle_object_expiration_transition.py + config-file-name: test_lc_date_expire_header.yaml + - test: + name: Header validation of Bucket Lifecycle expiration with days + desc: Test header validation bucket lifecycle expiration with days + polarion-id: CEPH-83573254 + module: sanity_rgw.py + config: + script-name: test_bucket_lifecycle_object_expiration_transition.py + config-file-name: test_lc_days_expire_header.yaml - test: name: Indexless buckets desc: Indexless (blind) buckets diff --git a/suites/reef/rgw/tier-2_rgw_regression_extended.yaml b/suites/reef/rgw/tier-2_rgw_regression_extended.yaml index f28fc4f54ae..426a3fc84c6 100644 --- a/suites/reef/rgw/tier-2_rgw_regression_extended.yaml +++ b/suites/reef/rgw/tier-2_rgw_regression_extended.yaml @@ -291,3 +291,12 @@ tests: config: script-name: ../aws/test_acl.py config-file-name: ../../aws/configs/test_public_read_write_acl.yaml + + - test: + name: Test Inditex hotfix bz, CORS presigned put url + desc: CORS ACL's prevents access to buckets with presigned PUT URI's with ACL private header + polarion-id: CEPH-83604475 + module: sanity_rgw.py + config: + script-name: ../curl/test_cors_using_curl.py + config-file-name: ../../curl/configs/test_cors_presigned_put_url_using_curl.yaml diff --git a/suites/reef/rgw/tier-2_rgw_regression_test.yaml b/suites/reef/rgw/tier-2_rgw_regression_test.yaml index bd3c01d1333..3e7cb5319ec 100644 --- a/suites/reef/rgw/tier-2_rgw_regression_test.yaml +++ b/suites/reef/rgw/tier-2_rgw_regression_test.yaml @@ -609,3 +609,14 @@ tests: run-on-rgw: true script-name: ../curl/test_cors_using_curl.py config-file-name: ../../curl/configs/test_crlf_injection_curl.yaml + + - test: + name: LC process with versioning suspended bucket + desc: LC process with versioning suspended bucket + polarion-id: CEPH-83574809 + module: sanity_rgw.py + comments: Known issue BZ-2336868(targeted to 7.1z3) + config: + test-version: v2 + script-name: test_bucket_lifecycle_object_expiration_transition.py + config-file-name: test_lc_process_with_versioning_suspended.yaml diff --git a/suites/squid/cephfs/tier-2_file-dir-lay_vol-mgmt_nfs.yaml b/suites/squid/cephfs/tier-2_file-dir-lay_vol-mgmt_nfs.yaml index e2465b896ad..99b6f2a6805 100644 --- a/suites/squid/cephfs/tier-2_file-dir-lay_vol-mgmt_nfs.yaml +++ b/suites/squid/cephfs/tier-2_file-dir-lay_vol-mgmt_nfs.yaml @@ -325,6 +325,24 @@ tests: polarion-id: CEPH-83604070 desc: Test fs volume creation, run IO & deletion in loop for 5 times abort-on-fail: false + - test: + name: volume related scenarios(delete,rename) + module: cephfs_vol_management.cephfs_vol_mgmt_volume_scenarios.py + polarion-id: CEPH-83603354 + desc: volume related scenarios(delete,rename) + abort-on-fail: false + - test: + name: cephfs subvolumegroup scenarios + module: cephfs_vol_management.cephfs_vol_mgmt_subvolgroup_scenarios.py + polarion-id: CEPH-83604079 + desc: cephfs subvolumegroup scenarios + abort-on-fail: false + - test: + name: cephfs subvolume idempoence earmark + module: cephfs_vol_management.cephfs_vol_mgmt_subvol_idempotence_earmark.py + polarion-id: CEPH-83604184 + desc: cephfs subvolume idempoence earmark + abort-on-fail: false - test: abort-on-fail: true desc: "test cephfs nfs export path" @@ -476,12 +494,6 @@ tests: module: cephfs_nfs.nfs_multiple_export_using_single_conf.py name: "nfs_multiple_export_using_single_conf" polarion-id: "CEPH-83575082" - - test: - name: cephfs subvolume idempoence earmark - module: cephfs_vol_management.cephfs_vol_mgmt_subvol_idempotence_earmark.py - polarion-id: CEPH-83604184 - desc: cephfs subvolume idempoence earmark - abort-on-fail: false - test: name: Basic info validation after volume creation and deletion module: cephfs_vol_management.cephfs_vol_mgmt_test_volume.py diff --git a/suites/squid/cephfs/tier-4_cephfs_recovery.yaml b/suites/squid/cephfs/tier-4_cephfs_recovery.yaml index 5db136b9368..3614b6c1afc 100644 --- a/suites/squid/cephfs/tier-4_cephfs_recovery.yaml +++ b/suites/squid/cephfs/tier-4_cephfs_recovery.yaml @@ -233,3 +233,4 @@ tests: module: cephfs_journal_tool.cephfs_journal_tool_event_mode.py name: journal_tool_event_mode polarion-id: "CEPH-83595482" + comments: "BZ 2335321" diff --git a/suites/squid/common/sanity/cephfs.yaml b/suites/squid/common/sanity/cephfs.yaml new file mode 100644 index 00000000000..6e4f2b3f656 --- /dev/null +++ b/suites/squid/common/sanity/cephfs.yaml @@ -0,0 +1,24 @@ +tests: + - test: + name: Test CephFs default values for MDs + desc: Deploy MDs with default values using CephAdm + polarion-id: CEPH-83574284 + module: mds_default_values.py + + - test: + name: Test CephFs basic operations + desc: CephFs basic operations + polarion-id: CEPH-11293 + module: cephfs_basic_tests.py + + - test: + name: Test NFS Ganesha with cephfs + desc: Configure nfs-ganesha on nfs server and IOs operations + polarion-id: CEPH-83574439 + module: nfs-ganesha_basics.py + + - test: + name: Test Increase and Decrease of MDs + desc: MDs deployment using CephAdm and increase/decrease number of MDs + polarion-id: CEPH-83574286 + module: mds_inc_dec.py diff --git a/suites/squid/common/sanity/deploy-and-configure-ibm.yaml b/suites/squid/common/sanity/deploy-and-configure-ibm.yaml new file mode 100644 index 00000000000..377671bfbc5 --- /dev/null +++ b/suites/squid/common/sanity/deploy-and-configure-ibm.yaml @@ -0,0 +1,124 @@ +tests: + - test: + name: Setup pre-requisites + desc: Setup packages and configuration for cluster deployment + module: install_prereq.py + abort-on-fail: true + + - test: + name: Deploy cluster using cephadm + desc: Bootstrap and deploy services + polarion-id: CEPH-83573713 + module: test_cephadm.py + config: + steps: + - config: + service: cephadm + command: bootstrap + args: + mon-ip: node1 + - config: + service: host + command: add_hosts + args: + attach_ip_address: true + labels: apply-all-labels + - config: + service: osd + command: apply + args: + all-available-devices: true + - config: + service: rgw + command: apply + pos_args: + - rgw.1 + args: + placement: + label: rgw + - config: + command: shell + args: + - "ceph fs volume create cephfs" + - config: + service: mds + command: apply + args: + placement: + label: mds + base_cmd_args: + verbose: true + pos_args: + - cephfs + - config: + command: shell + args: + - "ceph osd pool create rbd" + - config: + command: shell + args: + - "rbd pool init rbd" + - config: + command: shell + args: + - "ceph osd pool create rbd-gw" + - config: + command: shell + args: + - "rbd pool init rbd-gw" + - config: + command: shell + args: + - "rbd create -s 1G image1" + - config: + command: apply + service: nvmeof + args: + placement: + label: nvmeof-gw + pos_args: + - rbd-gw + - gw_group1 + abort-on-fail: true + + - test: + name: Configure client + desc: Configure the RGW & RBD clients + module: test_client.py + config: + command: add + id: client.1 + node: node8 + install_packages: + - ceph-common + - rbd-nbd + - jq + - fio + copy_admin_keyring: true + caps: + mon: "allow *" + osd: "allow *" + mds: "allow *" + mgr: "allow *" + abort-on-fail: true + + - test: + name: Configure client + desc: Configure the RGW & RBD clients + module: test_client.py + config: + command: add + id: client.2 + node: node9 + install_packages: + - ceph-common + - rbd-nbd + - jq + - fio + copy_admin_keyring: true + caps: + mon: "allow *" + osd: "allow *" + mds: "allow *" + mgr: "allow *" + abort-on-fail: true diff --git a/suites/squid/common/sanity/deploy-and-configure-rh.yaml b/suites/squid/common/sanity/deploy-and-configure-rh.yaml new file mode 100644 index 00000000000..b27db9901d4 --- /dev/null +++ b/suites/squid/common/sanity/deploy-and-configure-rh.yaml @@ -0,0 +1,103 @@ +tests: + - test: + name: Setup pre-requisites + desc: Setup packages and configuration for cluster deployment + module: install_prereq.py + abort-on-fail: true + + - test: + name: Deploy cluster using cephadm + desc: Bootstrap and deploy services + polarion-id: CEPH-83573713 + module: test_cephadm.py + config: + steps: + - config: + service: cephadm + command: bootstrap + args: + mon-ip: node1 + - config: + service: host + command: add_hosts + args: + attach_ip_address: true + labels: apply-all-labels + - config: + service: osd + command: apply + args: + all-available-devices: true + - config: + service: rgw + command: apply + pos_args: + - rgw.1 + args: + placement: + label: rgw + - config: + command: shell + args: + - "ceph fs volume create cephfs" + - config: + service: mds + command: apply + args: + placement: + label: mds + base_cmd_args: + verbose: true + pos_args: + - cephfs + - config: + command: shell + args: + - "ceph osd pool create rbd" + - config: + command: shell + args: + - "rbd pool init rbd" + abort-on-fail: true + + - test: + name: Configure client + desc: Configure the RGW & RBD clients + module: test_client.py + config: + command: add + id: client.1 + node: node6 + install_packages: + - ceph-common + - rbd-nbd + - jq + - fio + copy_admin_keyring: true + caps: + mon: "allow *" + osd: "allow *" + mds: "allow *" + mgr: "allow *" + abort-on-fail: true + + - test: + name: Configure client + desc: Configure the RGW & RBD clients + module: test_client.py + config: + command: add + id: client.2 + node: node7 + install_packages: + - ceph-common + - rbd-nbd + - jq + - fio + copy_admin_keyring: true + caps: + mon: "allow *" + osd: "allow *" + mds: "allow *" + mgr: "allow *" + abort-on-fail: true diff --git a/suites/squid/common/sanity/nfs.yaml b/suites/squid/common/sanity/nfs.yaml new file mode 100644 index 00000000000..08f23410270 --- /dev/null +++ b/suites/squid/common/sanity/nfs.yaml @@ -0,0 +1,309 @@ +tests: + + - test: + name: Nfs Ganesha File Lock + module: nfs_verify_file_lock.py + desc: Perform locking on same file from 2 different clients + polarion-id: CEPH-83577561 + abort-on-fail: false + config: + nfs_version: 4.1 + clients: 2 + + - test: + name: Nfs Ganesha Pynfs + module: nfs_verify_pynfs.py + desc: Perform pynfs tests on the Nfs cluster + polarion-id: CEPH-83577559 + abort-on-fail: false + config: + nfs_version: 4.1 + clients: 2 + + - test: + name: Nfs export Readonly parameter + module: test_export_readonly.py + desc: Test NFS export with Readonly + polarion-id: CEPH-83578392 + abort-on-fail: false + config: + nfs_version: 4.1 + clients: 1 + + - test: + name: Nfs export rootsquash permission + module: test_export_rootsquash.py + desc: Test NFS export with rootsquash + polarion-id: CEPH-83578393 + abort-on-fail: false + config: + nfs_version: 4.1 + clients: 1 + + - test: + name: NFS-Ganesha export config to mount with RO + desc: Verify edit export config to mount with RO + module: nfs_edit_export_config_with_ro.py + polarion-id: CEPH-83578395 + abort-on-fail: false + config: + nfs_version: 4.1 + clients: 1 + + - test: + name: NFS-Ganesha export config to mount with 1 client access + desc: Verify edit the export config to mount with allow access to only 1 client + module: nfs_edit_export_config_with_1client_access.py + polarion-id: CEPH-83578397 + abort-on-fail: false + config: + nfs_version: 4.1 + clients: 2 + +# ToDo: Needs to revisit the test steps. Moving it out of ci runs till test steps verified +# - test: +# name: Nfs access file from with root-squash enabled +# module: nfs_verify_file_access_with_rootsquash.py +# desc: Test NFS file access with root-squash enabled +# polarion-id: CEPH-83577604 +# abort-on-fail: false +# config: +# nfs_version: 4.1 +# clients: 4 + + - test: + name: NFS-Ganesha test cmount_path param in export file + desc: Verify cmount_path entry in export file + module: nfs_validate_cmount_path_export_conf.py + polarion-id: CEPH-83593816 + abort-on-fail: false + config: + nfs_version: 4.1 + clients: 1 + + - test: + name: Nfs-Ganesha test export creation from multiple cephfs filesystem + module: nfs_test_multiple_filesystem_exports.py + desc: Nfs-Ganesha create multiple exports mapped to multiple cephfs filesystem + polarion-id: CEPH-83595738 + abort-on-fail: false + config: + nfs_version: 4.1 + clients: 1 + + - test: + name: Nfs Ganesha File Lock + module: nfs_verify_file_lock.py + desc: Perform locking on same file from 2 different clients + polarion-id: CEPH-83577561 + abort-on-fail: false + config: + nfs_version: 3 + clients: 2 + + - test: + name: Nfs export Readonly parameter + module: test_export_readonly.py + desc: Test NFS export with Readonly + polarion-id: CEPH-83578392 + abort-on-fail: false + config: + nfs_version: 3 + clients: 1 + + - test: + name: Nfs export rootsquash permission + module: test_export_rootsquash.py + desc: Test NFS export with rootsquash + polarion-id: CEPH-83578393 + abort-on-fail: false + config: + nfs_version: 3 + clients: 1 + + - test: + name: NFS-Ganesha export config to mount with RO + desc: Verify edit export config to mount with RO + module: nfs_edit_export_config_with_ro.py + polarion-id: CEPH-83578395 + abort-on-fail: false + config: + nfs_version: 3 + clients: 1 + + - test: + name: NFS-Ganesha export config to mount with 1 client access + desc: Verify edit the export config to mount with allow access to only 1 client + module: nfs_edit_export_config_with_1client_access.py + polarion-id: CEPH-83578397 + abort-on-fail: false + config: + nfs_version: 3 + clients: 2 + + - test: + name: NFS-Ganesha test cmount_path param in export file + desc: Verify cmount_path entry in export file + module: nfs_validate_cmount_path_export_conf.py + polarion-id: CEPH-83593816 + abort-on-fail: false + config: + nfs_version: 3 + clients: 1 + + - test: + name: Nfs Ganesha File Lock + module: nfs_verify_file_lock.py + desc: Perform locking on same file from 2 different clients + polarion-id: CEPH-83577561 + abort-on-fail: false + config: + nfs_version: 4.2 + clients: 2 + +# Skipping the test for failure analysis +# - test: +# name: Nfs Verify File Operations +# module: nfs_verify_file_operations.py +# desc: Change the ownership (user and group) of files within the NFS export +# polarion-id: CEPH-83577601 +# abort-on-fail: false +# config: +# nfs_version: 4.2 +# clients: 3 +# file_count: 100 +# operations: +# client01 : create_files +# client02 : change_ownership +# client03 : perform_lookups + + - test: + name: Nfs export Readonly parameter + module: test_export_readonly.py + desc: Test NFS export with Readonly + polarion-id: CEPH-83578392 + abort-on-fail: false + config: + nfs_version: 4.2 + clients: 1 + + - test: + name: Nfs export rootsquash permission + module: test_export_rootsquash.py + desc: Test NFS export with rootsquash + polarion-id: CEPH-83578393 + abort-on-fail: false + config: + nfs_version: 4.2 + clients: 1 + + - test: + name: NFS-Ganesha export config to mount with RO + desc: Verify edit export config to mount with RO + module: nfs_edit_export_config_with_ro.py + polarion-id: CEPH-83578395 + abort-on-fail: false + config: + nfs_version: 4.2 + clients: 1 + + - test: + name: NFS-Ganesha export config to mount with 1 client access + desc: Verify edit the export config to mount with allow access to only 1 client + module: nfs_edit_export_config_with_1client_access.py + polarion-id: CEPH-83578397 + abort-on-fail: false + config: + nfs_version: 4.2 + clients: 2 + +# ToDo: Needs to revisit the test steps. Moving it out of ci runs till test steps verified +# - test: +# name: Nfs access file from with root-squash enabled +# module: nfs_verify_file_access_with_rootsquash.py +# desc: Test NFS file access with root-squash enabled +# polarion-id: CEPH-83577604 +# abort-on-fail: false +# config: +# nfs_version: 4.2 +# clients: 4 + + - test: + name: NFS-Ganesha test setfattr and getfattr operations + desc: Verify setting and fetching the extended attributes from NFS + module: test_nfs_get_set_attr_operation.py + polarion-id: CEPH-83591737 + abort-on-fail: false + config: + nfs_version: 4.2 + clients: 1 + + - test: + name: Nfs verify setting and deleting multiple xttar on file + module: nfs_multiple_xattr_set_and_delete.py + desc: Test setting up and deleting multiple xattr on single file + polarion-id: CEPH-83591993, CEPH-83591994 + abort-on-fail: false + config: + nfs_version: 4.2 + clients: 1 + + - test: + name: Nfs verify extended attributes on readonly file + module: nfs_verify_xattr_readonly_file.py + desc: Test the extended attributes on the readonly file + polarion-id: CEPH-83592397 + abort-on-fail: false + config: + nfs_version: 4.2 + clients: 1 + + - test: + name: NFS-Ganesha test setting selinux context on file + desc: Verify selinux label is set on NFS mount + module: nfs_test_setting_selinux_context.py + polarion-id: CEPH-83593098 + abort-on-fail: false + config: + nfs_version: 4.2 + clients: 2 + + - test: + name: NFS-Ganesha test selinux lable with move operation + desc: Verify selinux label is preserved when file is moved + module: nfs_test_selinux_context_moving_files.py + polarion-id: CEPH-83593099 + abort-on-fail: false + config: + nfs_version: 4.2 + clients: 2 + + - test: + name: NFS-Ganesha test selinux context set via mount command + desc: Verify selinux label set directly on NFS mount point + module: test_nfs_selinux_label_while_mounting.py + polarion-id: CEPH-83593152 + abort-on-fail: false + config: + nfs_version: 4.2 + clients: 1 + + - test: + name: NFS-Ganesha test cmount_path param in export file + desc: Verify cmount_path entry in export file + module: nfs_validate_cmount_path_export_conf.py + polarion-id: CEPH-83593816 + abort-on-fail: false + config: + nfs_version: 4.2 + clients: 1 + + - test: + name: Nfs Ganesha test multi mount versions - 4.1 and 4.2 simultaneous mount + module: nfs_multi_mount_version_4x.py + desc: Perform mount with 4.1 and 4.2 versions on multiple client + polarion-id: CEPH-83597604 + abort-on-fail: true + config: + clients: 2 + nfs_version: [4.1: 1, 4.2: 1] diff --git a/suites/squid/common/sanity/nvmeof.yaml b/suites/squid/common/sanity/nvmeof.yaml new file mode 100644 index 00000000000..5e0dc255f6b --- /dev/null +++ b/suites/squid/common/sanity/nvmeof.yaml @@ -0,0 +1,140 @@ +tests: + - test: + name: Basic E2ETest Ceph NVMEoF GW sanity test on dedicated node + desc: E2E-Test NVMEoF Gateway on dedicated node and Run IOs on targets + polarion-id: CEPH-83575441 + module: test_ceph_nvmeof_gateway.py + config: + gw_node: node6 + rbd_pool: rbd-gw + gw_group: gw_group1 + do_not_create_image: true + rep-pool-only: true + rep_pool_config: + pool: rbd-gw + install: true # Run SPDK with all pre-requisites + cleanup: + - subsystems + - initiators + - pool + - gateway + subsystems: # Configure subsystems with all sub-entities + - nqn: nqn.2016-06.io.spdk:cnode1 + serial: 1 + bdevs: + count: 1 + size: 10G + listener_port: 5001 + allow_host: "*" + initiators: # Configure Initiators with all pre-req + - subnqn: nqn.2016-06.io.spdk:cnode1 + listener_port: 5001 + node: node9 + abort-on-fail: true + + # Configure Ceph NVMeoF gateway + # Configure Initiators + # Run IO on NVMe Targets + - test: + name: NVMeoF 2-GW HA failover-failback via systemctl + desc: NVMEoF failover-failback test via systemctl rpm tool + polarion-id: CEPH-83588636 + module: test_ceph_nvmeof_high_availability.py + config: + rbd_pool: rbd + gw_group: gw_group1 + do_not_create_image: true + rep-pool-only: true + rep_pool_config: + pool: rbd + install: true # Run SPDK with all pre-requisites + cleanup: + - pool + - gateway + - initiators + gw_nodes: + - node6 + - node7 + subsystems: # Configure subsystems with all sub-entities + - nqn: nqn.2016-06.io.spdk:cnode1 + serial: 1 + bdevs: + - count: 2 + size: 2G + lb_group: node6 + listener_port: 4420 + listeners: + - node6 + - node7 + allow_host: "*" + - nqn: nqn.2016-06.io.spdk:cnode2 + serial: 2 + bdevs: + - count: 2 + size: 2G + lb_group: node7 + listener_port: 4420 + listeners: + - node6 + - node7 + allow_host: "*" + initiators: # Configure Initiators with all pre-req + - nqn: connect-all + listener_port: 4420 + node: node8 + fault-injection-methods: # Failure induction + - tool: systemctl + nodes: node6 + abort-on-fail: true + + - test: + name: NVMeoF 2-GW HA failover-failback via orchestrator + desc: NVMEoF failover-failback test via cephadm daemon orchestration + polarion-id: CEPH-83588699 + module: test_ceph_nvmeof_high_availability.py + config: + rbd_pool: rbd + gw_group: gw_group1 + do_not_create_image: true + rep-pool-only: true + rep_pool_config: + pool: rbd + install: true # Run SPDK with all pre-requisites + cleanup: + - pool + - gateway + - initiators + gw_nodes: + - node6 + - node7 + subsystems: # Configure subsystems with all sub-entities + - nqn: nqn.2016-06.io.spdk:cnode1 + serial: 1 + bdevs: + - count: 2 + size: 2G + lb_group: node6 + listener_port: 4420 + listeners: + - node6 + - node7 + allow_host: "*" + - nqn: nqn.2016-06.io.spdk:cnode2 + serial: 2 + bdevs: + - count: 2 + size: 2G + lb_group: node7 + listener_port: 4420 + listeners: + - node6 + - node7 + allow_host: "*" + initiators: # Configure Initiators with all pre-req + - nqn: connect-all + listener_port: 4420 + node: node8 + fault-injection-methods: # Failure induction + - tool: daemon + nodes: node6 + abort-on-fail: true diff --git a/suites/squid/common/sanity/rados.yaml b/suites/squid/common/sanity/rados.yaml new file mode 100644 index 00000000000..1f721d8982d --- /dev/null +++ b/suites/squid/common/sanity/rados.yaml @@ -0,0 +1,312 @@ +# Suite contains basic tier-2 rados tests +tests: + + - test: + name: Configure email alerts + desc: Configure email alerts on ceph cluster + polarion-id: CEPH-83574472 + module: rados_prep.py + config: + email_alerts: + smtp_host: smtp.corp.redhat.com + smtp_sender: ceph-iad2-c01-lab.mgr@redhat.com + smtp_port: 25 + interval: 10 + smtp_destination: + - pdhiran@redhat.com + smtp_from_name: Rados Sanity Cluster Alerts + + - test: + name: Test configuration Assimilation + desc: Verify config assimilation into ceph mon configuration database + polarion-id: CEPH-83573480 + comments: RFE - 2277761 + module: test_config_assimilation.py + config: + cluster_conf_path: "conf/squid/rados/test-confs/cluster-configs" + Verify_config_parameters: + configurations: + - config-1: + section: "mon" + name: "mon_cluster_log_to_syslog" + value: "true" + - config-2: + section: "osd" + name: "debug_osd" + value: "5/5" + - config-3: + section: "mgr" + name: "mgr_stats_period" + value: "10" + - config-4: + section: "mgr" + name: "debug_mgr" + value: "5/5" + - config-5: + section: "mds" + name: "mds_op_history_size" + value: "40" + + - test: + name: Enable logging to file + desc: Change config options to enable logging to file + module: rados_prep.py + config: + log_to_file: true + +# The below test is openstack only, and would need modifications to run on BM. +# commenting the run of below test in BM pipeline + - test: + name: Verify osd heartbeat no reply + desc: heartbeat_check log entries should contain hostname:port + polarion-id: CEPH-10839 + module: test_osd_heartbeat.py + destroy-cluster: false + + - test: + name: Monitor configuration - section and masks changes + desc: Verify config changes for section & masks like device class, host etc + module: rados_prep.py + polarion-id: CEPH-83573477 + config: + Verify_config_parameters: + configurations: + - config-1: + section: "osd" + name: "osd_max_backfills" + value: "8" + location_type: "class" + location_value: "hdd" + - config-2: + section: "osd" + name: "osd_recovery_max_active" + value: "8" + location_type: "host" + location_value: "host" + - config-3: + section: "global" + name: "debug_mgr" + value: "10/10" + - config-4: + section: "osd" + name: "osd_max_scrubs" + value: "5" + - config-5: + section: "osd.1" + name: "osd_max_scrubs" + value: "3" + - config-6: + section: "mds" + name: "mds_op_history_size" + value: "40" + - config-7: + section: "client.rgw" + name: "rgw_lc_debug_interval" + value: "1" + - config-8: + section: "global" + name: "debug_mgr" + value: "10/10" + - config-9: + section: "osd.2" + name: "debug_ms" + value: "10/10" + + - test: + name: Monitor configuration - msgrv2 compression modes + desc: Verify the health status of the cluster by randomly changing the compression configuration values + polarion-id: CEPH-83574961 + module: rados_prep.py + config: + Verify_config_parameters: + configurations: + - config-1: + section: "mon" + name: "ms_osd_compress_mode" + value: "force" + - config-2: + section: "mon" + name: "ms_osd_compress_min_size" + value: "512" + - config-3: + section: "mon" + name: "ms_osd_compress_mode" + value: "none" + - config-4: + section: "mon" + name: "ms_osd_compress_min_size" + value: "1024" + + - test: + name: Replicated pool LC + desc: Create replicated pools and run IO + polarion-id: CEPH-83571632 + module: rados_prep.py + config: + replicated_pool: + create: true + pool_name: test_re_pool + pg_num: 16 + size: 2 + disable_pg_autoscale: true + rados_write_duration: 50 + rados_read_duration: 30 + set_pool_configs: + pool_name: test_re_pool + configurations: + pg_num: 32 + pgp_num: 32 + pg_autoscale_mode: 'on' + compression_mode: aggressive + compression_algorithm: zlib + delete_pools: + - test_re_pool + + - test: + name: Compression algorithms + desc: Enable/disable different compression algorithms. + polarion-id: CEPH-83571669 + module: rados_prep.py + config: + replicated_pool: + create: true + pool_name: re_pool_compress + pg_num: 32 + rados_write_duration: 10 + rados_read_duration: 10 + enable_compression: + pool_name: re_pool_compress + rados_write_duration: 50 + rados_read_duration: 50 + configurations: + - config-1: + compression_mode: force + compression_algorithm: snappy + compression_required_ratio: 0.3 + compression_min_blob_size: 1B + byte_size: 10KB + - config-2: + compression_mode: passive + compression_algorithm: zlib + compression_required_ratio: 0.7 + compression_min_blob_size: 10B + byte_size: 100KB + - config-3: + compression_mode: aggressive + compression_algorithm: zstd + compression_required_ratio: 0.5 + compression_min_blob_size: 1KB + byte_size: 100KB + delete_pools: + - re_pool_compress + + - test: + name: Ceph balancer plugin + desc: Ceph balancer plugins CLI validation in crush-compat mode + polarion-id: CEPH-83573247 + module: rados_prep.py + config: + configure_balancer: + configure: true + balancer_mode: crush-compat + target_max_misplaced_ratio: 0.04 + sleep_interval: 30 + + - test: + name: Ceph balancer test + desc: Ceph balancer plugins CLI validation in upmap mode + polarion-id: CEPH-83573251 + module: rados_prep.py + config: + configure_balancer: + configure: true + balancer_mode: upmap + target_max_misplaced_ratio: 0.05 + sleep_interval: 60 + + - test: + name: Ceph PG Autoscaler + desc: Ceph PG autoscaler CLI validation + polarion-id: CEPH-83573412 + module: rados_prep.py + config: + replicated_pool: + create: true + pool_name: rep_test_pool + rados_write_duration: 50 + rados_read_duration: 50 + pg_num: 32 + configure_pg_autoscaler: + default_mode: warn + mon_target_pg_per_osd: 128 + pool_config: + pool_name: rep_test_pool + pg_autoscale_mode: "on" + pg_num_min: 16 + target_size_ratio: 0.4 + delete_pools: + - rep_test_pool + + - test: + name: Config checks + desc: Enable Cluster Configuration checks + polarion-id: CEPH-83574529 + module: rados_prep.py + config: + cluster_configuration_checks: + configure: true + disable_check_list: + - osd_mtu_size + - osd_linkspeed + - kernel_security + enable_check_list: + - kernel_security + - osd_linkspeed + + - test: + name: config source changes log + desc: Config sources - Verify config source changes in the log + polarion-id: CEPH-83573479 + module: test_mon_config_history.py + + - test: + name: config source changes reset + desc: Config sources - Verify config source changes and reset config + polarion-id: CEPH-83573478 + module: test_mon_config_reset.py + +# Commenting until bug fix : https://bugzilla.redhat.com/show_bug.cgi?id=2252788 +# Other issues with noautoscale flag listed here : https://bugzilla.redhat.com/show_bug.cgi?id=2283358 + - test: + name: autoscaler flags + desc: verify autoscaler flags functionality + comments: active Bugs 2252788, 2283358 + polarion-id: CEPH-83574794 + module: test_pg_autoscale_flag.py + config: + pool_configs_path: "conf/squid/rados/test-confs/pool-configurations.yaml" + create_ec_pool: true + create_re_pool: true + + - test: + name: Mon election strategies + desc: Change Mon election strategies and verify status + polarion-id: CEPH-83573627 + module: test_election_strategies.py + + - test: + name: EC Profile tests + desc: Verify the behaviour of EC profiles in ceph + polarion-id: CEPH-83596295 + module: pool_tests.py + config: + Verify_ec_profile: + name: test + profile_name: test_profile + pool_name: ec_profile_test + k: 2 + m: 2 + plugin: jerasure + rados_write_duration: 100 + rados_read_duration: 50 + crush-failure-domain: host diff --git a/suites/squid/common/sanity/rbd.yaml b/suites/squid/common/sanity/rbd.yaml new file mode 100644 index 00000000000..e7ae02aa887 --- /dev/null +++ b/suites/squid/common/sanity/rbd.yaml @@ -0,0 +1,239 @@ +tests: + +# Tests from - suites/squid/rbd/tier-1_rbd.yaml + + - test: + name: Install rbd-nbd + desc: Install rbd-nbd and remove any epel packages + module: exec.py + config: + sudo: true + commands: + - "rm -rf /etc/yum.repos.d/epel*" + - "dnf install rbd-nbd -y" + + - test: + name: 1_rbd_cli_groups + desc: "Executing upstream RBD CLI Groups scenarios" + polarion-id: CEPH-83574239 + module: test_rbd.py + config: + script: rbd_groups.sh + script_path: qa/workunits/rbd + + - test: + name: 2_rbd_cli_import_export + desc: "Executing upstream RBD CLI Import Export scenarios" + polarion-id: CEPH-83574240 + module: test_rbd.py + config: + script: import_export.sh + script_path: qa/workunits/rbd + + - test: + name: 5_librbd_python + desc: Executig upstream LibRBD scenarios + polarion-id: CEPH-83574524 + module: test_rbd.py + config: + script_path: qa/workunits/rbd + script: test_librbd_python.sh + + - test: + name: 6_rbd_permissions + desc: Executig upstream RBD permissions scenarios + polarion-id: CEPH-83574525 + module: test_rbd.py + config: + script_path: qa/workunits/rbd + script: permissions.sh + + - test: + name: 7_rbd_read_flags + desc: Executig upstream RBD Read Flag scenarios + polarion-id: CEPH-83574526 + module: test_rbd.py + config: + script_path: qa/workunits/rbd + script: read-flags.sh + + - test: + name: 9_journal + desc: Executig upstream RBD Journal scenarios + polarion-id: CEPH-83574527 + module: test_rbd.py + config: + script_path: qa/workunits/rbd + script: journal.sh + + - test: + name: 10_rbd_kernel + desc: Executig upstream RBD Kernal scenarios + polarion-id: CEPH-83574528 + module: test_rbd.py + config: + script_path: qa/workunits/rbd + script: kernel.sh + + - test: + name: 11_rbd_krbd_exclusive + desc: Executig upstream RBD kernel exclusive scenarios + polarion-id: CEPH-83574531 + module: test_rbd.py + config: + script_path: qa/workunits/rbd + script: krbd_exclusive_option.sh + + - test: + name: test delete clones with io + desc: Create clone of an image and delete while krbd IO is running + polarion-id: CEPH-9225 + module: delete_clones_with_io.py + +# Tests from - suites/squid/rbd/tier-2_rbd_encryption.yaml + + - test: + name: encrypt image and clone using different keys + desc: Encrypt & decrypt file using same keys and different keys + polarion-id: CEPH-83575263 + module: test_rbd_encryption.py + config: + encryption_type: #parent,clone + - luks1,luks1 + + - test: + name: Encrypt image and clone using combinations of encryption type + desc: Apply different combinations of encryption to parent and clone + polarion-id: CEPH-83575251 + module: test_rbd_encryption.py + config: + encryption_type: #parent,clone + - luks1,luks1 + - luks1,NA + - luks2,luks1 + - luks2,luks2 + - luks2,NA + - NA,luks1 + - NA,luks2 + - luks1,luks2 + +# Tests from - suites/squid/rbd/tier-2_rbd_regression.yaml + + - test: + name: snap and clone on imported image + desc: snap and clone operations on imported image + polarion-id: CEPH-9230 + module: rbd_snap_clone_imported_image.py + + - test: + name: Verify exclusive lock feature + desc: Verify Delayed deletion with exclusive feature on RBD image + polarion-id: CEPH-11408 + module: rbd_exclusive_lock_rm_image.py + config: + io-total: 5G + + - test: + name: Test for parent image deletion after flattening the clone and removing snap + desc: verify for parent image deletion after flattening the clone and removing snap + polarion-id: CEPH-11409 + module: rbd_clone_delete_parent_image.py + config: + io-total: 5G + + - test: + name: clones creation with v2clone format + desc: Verify that clones creation and deletion of parent image with V2 enabled + polarion-id: CEPH-83573309 + module: rbd_clonev2.py + + - test: + name: parent snap deletion after clone flattening + desc: Verify parent snapshot deletion after flattening the clone + polarion-id: CEPH-83573650 + module: rbd_clone_delete_parent_snapshot.py + + - test: + name: Test Snapshot Rename functionality + desc: Rename image snapshots on an image on replicated and ecpools and its clones + polarion-id: CEPH-9833 + module: rbd_snapshot_rename.py + + - test: + name: Test Snapshot Rename for snapshot with clone + desc: Rename image snapshot from which another image was cloned on a replicated and ecpool + polarion-id: CEPH-9835 + module: rbd_rename_cloned_snapshot.py + + - test: + name: Test Snapshot Rename with clone operations in progress + desc: Rename image snapshot when operations on clone/parent image is in progress + polarion-id: CEPH-9836 + module: rbd_snapshot_rename_advanced.py + + - test: + name: krbd client - Test to Perform Deletion of protected snapshot + desc: Trying to delete a protected snapshot should fail - negative + polarion-id: CEPH-9224 + module: rbd_delete_protected_snapshot_krbd.py + config: + do_not_create_image: True + operations: + map: true + io: true + nounmap: true + + - test: + name: Test to disable image feature when flatten operation is performed + desc: Perform flatten operations while changing the image feature + polarion-id: CEPH-9862 + module: rbd_flatten_image_feature_disable.py + config: + rbd_op_thread_timeout: 120 + + - test: + name: Test to verify image feature disable with image meta on it + desc: Verify image feature disable on image having image meta set on it + polarion-id: CEPH-9864 + module: image_with_metadata_feature_disable.py + config: + image_feature: deep-flatten + + - test: + name: Test to verify image resize operation while changing image feature + desc: Verify image resize while changing image feature + polarion-id: CEPH-9861 + module: rbd_resize_image_with_image_feature.py + config: + image_feature: fast-diff + size_increase: 11G + size_decrease: 5G + rep_pool_config: + size: 10G + ec_pool_config: + size: 10G + + - test: + name: Test to verify data compression on global, pool and image level + desc: Verify rbd_compression_hint config settings + polarion-id: CEPH-83574644 + module: test_rbd_compression.py + config: + compression_algorithm: snappy + compression_mode: passive + compression_ratio: 0.7 + io_total: 1G + + - test: + name: Test image creation, write and read data using python rbd module + desc: Verify python rbd module + polarion-id: CEPH-83574791 + module: test_rbd_python_module.py + config: + do_not_create_image: true + rep_pool_config: + num_pools: 1 + num_images: 1 + ec_pool_config: + num_pools: 1 + num_images: 1 diff --git a/suites/squid/common/sanity/rgw.yaml b/suites/squid/common/sanity/rgw.yaml new file mode 100644 index 00000000000..b6a974ad1be --- /dev/null +++ b/suites/squid/common/sanity/rgw.yaml @@ -0,0 +1,126 @@ + +tests: + +# Tests from - suites/squid/rgw/tier-1_rgw.yaml + + - test: + name: enable bucket versioning + desc: Basic versioning test, also called as test to enable bucket versioning + polarion-id: CEPH-14261 # also applies to CEPH-9222 and CEPH-10652 + module: sanity_rgw.py + config: + script-name: test_versioning_with_objects.py + config-file-name: test_versioning_objects_enable.yaml + + - test: + name: Parallel run + desc: RGW tier-1 parallelly. + module: test_parallel.py + parallel: + - test: + name: Test M buckets with N objects + desc: test to create "M" no of buckets and "N" no of objects + polarion-id: CEPH-9789 + module: sanity_rgw.py + config: + script-name: test_Mbuckets_with_Nobjects.py + config-file-name: test_Mbuckets_with_Nobjects.yaml + install_common: false + run-on-rgw: true + + - test: + name: Test delete using M buckets with N objects + desc: test to create "M" no of buckets and "N" no of objects with delete + polarion-id: CEPH-14237 + module: sanity_rgw.py + config: + script-name: test_Mbuckets_with_Nobjects.py + config-file-name: test_Mbuckets_with_Nobjects_delete.yaml + install_common: false + run-on-rgw: true + + - test: + name: Test download with M buckets with N objects + desc: test to create "M" no of buckets and "N" no of objects with download + polarion-id: CEPH-14237 + module: sanity_rgw.py + config: + script-name: test_Mbuckets_with_Nobjects.py + config-file-name: test_Mbuckets_with_Nobjects_download.yaml + install_common: false + run-on-rgw: true + + - test: + name: Test multipart upload of M buckets with N objects + desc: test to create "M" no of buckets and "N" no of objects with multipart upload + polarion-id: CEPH-9801 + module: sanity_rgw.py + config: + script-name: test_Mbuckets_with_Nobjects.py + config-file-name: test_Mbuckets_with_Nobjects_multipart.yaml + install_common: false + run-on-rgw: true + + - test: + name: test_bi_purge for a bucket + desc: test bi_purge should not error + polarion-id: CEPH-83575234 + module: sanity_rgw.py + config: + install_common: false + script-name: test_Mbuckets_with_Nobjects.py + config-file-name: test_bi_purge.yaml + + - test: + name: Bucket Lifecycle Object_expiration_tests for non current version expiration + desc: Test object expiration for non current version expiration + polarion-id: CEPH-11190 + module: sanity_rgw.py + config: + script-name: test_bucket_lifecycle_object_expiration_transition.py + config-file-name: test_lc_rule_prefix_non_current_days.yaml + + - test: + name: Bucket Lifecycle Object_transition_tests for Prefix filter and versioned buckets + desc: Test object transition for Prefixand versioned buckets + polarion-id: CEPH-83574050 # also applies to CEPH-83574049, CEPH-83574053 + module: sanity_rgw.py + config: + script-name: test_bucket_lifecycle_object_expiration_transition.py + config-file-name: test_lc_transition_with_prefix_rule.yaml + + - test: + name: S3CMD small and multipart object download + desc: S3CMD small and multipart object download or GET + polarion-id: CEPH-83575477 + module: sanity_rgw.py + config: + script-name: ../s3cmd/test_s3cmd.py + config-file-name: ../../s3cmd/configs/test_get_s3cmd.yaml + + - test: + name: Test GetObjectAttributes with normal objects + desc: Test GetObjectAttributes with normal objects + polarion-id: CEPH-83595849 + module: sanity_rgw.py + config: + script-name: test_Mbuckets_with_Nobjects.py + config-file-name: test_Mbuckets_with_Nobjects_get_object_attributes.yaml + + - test: + name: Test GetObjectAttributes with checksum sha256 + desc: Test GetObjectAttributes with checksum sha256 + polarion-id: CEPH-83595849 + module: sanity_rgw.py + config: + script-name: test_Mbuckets_with_Nobjects.py + config-file-name: test_Mbuckets_with_Nobjects_get_object_attributes_checksum_sha256.yaml + + - test: + name: Test GetObjectAttributes with multipart objects + desc: Test GetObjectAttributes with multipart objects + polarion-id: CEPH-83595849 + module: sanity_rgw.py + config: + script-name: test_Mbuckets_with_Nobjects.py + config-file-name: test_Mbuckets_with_Nobjects_get_object_attributes_multipart.yaml diff --git a/suites/squid/common/smoke/bvt.yaml b/suites/squid/common/smoke/bvt.yaml new file mode 100644 index 00000000000..d44d2aca112 --- /dev/null +++ b/suites/squid/common/smoke/bvt.yaml @@ -0,0 +1,56 @@ +tests: + - test: + name: Executes RGW, RBD and FS operations + desc: Run object, block and filesystem basic operations parallelly. + module: test_parallel.py + parallel: + - test: + name: Test M buckets with N objects + desc: test to create "M" no of buckets and "N" no of objects + polarion-id: CEPH-9789 + module: sanity_rgw.py + config: + script-name: test_Mbuckets_with_Nobjects.py + config-file-name: test_Mbuckets_with_Nobjects.yaml + timeout: 300 + + - test: + name: Run RBD tier-0 operations + desc: Run RBD tier-0 operations + polarion-id: CEPH-83575401 + module: rbd_tier0.py + config: + ec-pool-k-m: 2,1 + ec-pool-only: False + ec_pool_config: + pool: rbd_pool + data_pool: rbd_ec_pool + ec_profile: rbd_ec_profile + image: rbd_image + image_thick_provision: rbd_thick_image + snap_thick_provision: rbd_thick_snap + clone_thick_provision: rbd_thick_clone + thick_size: 2G + size: 10G + snap: rbd_ec_pool_snap + clone: rbd_ec_pool_clone + rep_pool_config: + pool: rbd_rep_pool + image: rbd_rep_image + image_thick_provision: rbd_rep_thick_image + snap_thick_provision: rbd_rep_thick_snap + clone_thick_provision: rbd_rep_thick_clone + thick_size: 2G + size: 10G + snap: rbd_rep_pool_snap + clone: rbd_rep_pool_clone + operations: + map: true + io: true + nounmap: false + + - test: + name: cephfs-basics + desc: "cephfs basic operations" + polarion-id: "CEPH-11293,CEPH-11296,CEPH-11297,CEPH-11295" + module: cephfs_basic_tests.py diff --git a/suites/squid/common/smoke/deploy-and-configure.yaml b/suites/squid/common/smoke/deploy-and-configure.yaml new file mode 100644 index 00000000000..0104ebdde12 --- /dev/null +++ b/suites/squid/common/smoke/deploy-and-configure.yaml @@ -0,0 +1,76 @@ +tests: + - test: + name: Setup pre-requisites + desc: Setup packages and configuration for cluster deployment + module: install_prereq.py + abort-on-fail: true + + - test: + name: Deploy cluster using cephadm + desc: Bootstrap and deploy services + polarion-id: CEPH-83573713 + module: test_cephadm.py + config: + steps: + - config: + service: cephadm + command: bootstrap + args: + mon-ip: node1 + - config: + service: host + command: add_hosts + args: + attach_ip_address: true + labels: apply-all-labels + - config: + service: osd + command: apply + args: + all-available-devices: true + - config: + service: rgw + command: apply + pos_args: + - rgw.1 + args: + placement: + label: rgw + - config: + command: shell + args: + - "ceph fs volume create cephfs" + - config: + service: mds + command: apply + args: + placement: + label: mds + base_cmd_args: + verbose: true + pos_args: + - cephfs + - config: + command: shell + args: + - "ceph osd pool create rbd" + - config: + command: shell + args: + - "rbd pool init rbd" + destroy-cluster: false + abort-on-fail: true + + - test: + name: Configure client + desc: Configure the RGW & RBD clients + module: test_client.py + config: + command: add + id: client.1 + node: node4 + install_packages: + - ceph-common + copy_admin_keyring: true + destroy-cluster: false + abort-on-fail: true diff --git a/suites/squid/rgw/tier-1_rgw.yaml b/suites/squid/rgw/tier-1_rgw.yaml index 8ec7510c987..003e612eda6 100644 --- a/suites/squid/rgw/tier-1_rgw.yaml +++ b/suites/squid/rgw/tier-1_rgw.yaml @@ -194,6 +194,22 @@ tests: config: script-name: test_Mbuckets_with_Nobjects.py config-file-name: test_Mbuckets_with_Nobjects_get_object_attributes_multipart.yaml + - test: + name: Header validation of Bucket Lifecycle expiration with date + desc: Test header validation bucket lifecycle expiration with date + polarion-id: CEPH-83573254 + module: sanity_rgw.py + config: + script-name: test_bucket_lifecycle_object_expiration_transition.py + config-file-name: test_lc_date_expire_header.yaml + - test: + name: Header validation of Bucket Lifecycle expiration with days + desc: Test header validation bucket lifecycle expiration with days + polarion-id: CEPH-83573254 + module: sanity_rgw.py + config: + script-name: test_bucket_lifecycle_object_expiration_transition.py + config-file-name: test_lc_days_expire_header.yaml - test: name: Indexless buckets desc: Indexless (blind) buckets diff --git a/suites/squid/rgw/tier-2_rgw_3_way_multisite.yaml b/suites/squid/rgw/tier-2_rgw_3_way_multisite.yaml index 4e1751621fb..e1643b5f257 100644 --- a/suites/squid/rgw/tier-2_rgw_3_way_multisite.yaml +++ b/suites/squid/rgw/tier-2_rgw_3_way_multisite.yaml @@ -445,6 +445,16 @@ tests: module: sanity_rgw_multisite.py name: create non-tenanted user polarion-id: CEPH-83575199 + - test: + clusters: + ceph-pri: + config: + script-name: test_bucket_lifecycle_object_expiration_transition.py + config-file-name: test_lc_cloud_transition_restore_object.yaml + desc: test s3 object restore and download, and restore attrs + module: sanity_rgw_multisite.py + name: test s3 object restore and download, and restore attrs + polarion-id: CEPH-83591622 #CEPH-83591672 #CEPH-83591621 - test: clusters: diff --git a/suites/squid/rgw/tier-2_rgw_regression_extended.yaml b/suites/squid/rgw/tier-2_rgw_regression_extended.yaml index 75f983755ce..48f9d68ce8f 100644 --- a/suites/squid/rgw/tier-2_rgw_regression_extended.yaml +++ b/suites/squid/rgw/tier-2_rgw_regression_extended.yaml @@ -312,3 +312,12 @@ tests: config: script-name: ../aws/test_acl.py config-file-name: ../../aws/configs/test_public_read_write_acl.yaml + + - test: + name: Test Inditex hotfix bz, CORS presigned put url + desc: CORS ACL's prevents access to buckets with presigned PUT URI's with ACL private header + polarion-id: CEPH-83604475 + module: sanity_rgw.py + config: + script-name: ../curl/test_cors_using_curl.py + config-file-name: ../../curl/configs/test_cors_presigned_put_url_using_curl.yaml diff --git a/suites/squid/rgw/tier-2_rgw_regression_test.yaml b/suites/squid/rgw/tier-2_rgw_regression_test.yaml index 8be9610019d..9d5622050c6 100644 --- a/suites/squid/rgw/tier-2_rgw_regression_test.yaml +++ b/suites/squid/rgw/tier-2_rgw_regression_test.yaml @@ -633,3 +633,14 @@ tests: run-on-rgw: true script-name: ../curl/test_cors_using_curl.py config-file-name: ../../curl/configs/test_crlf_injection_curl.yaml + + - test: + name: LC process with versioning suspended bucket + desc: LC process with versioning suspended bucket + polarion-id: CEPH-83574809 + module: sanity_rgw.py + comments: Known issue BZ-2319199(targeted to 8.1) BZ-2336609(targeted to 8.0z2) + config: + test-version: v2 + script-name: test_bucket_lifecycle_object_expiration_transition.py + config-file-name: test_lc_process_with_versioning_suspended.yaml diff --git a/tests/cephfs/cephfs_journal_tool/cephfs_journal_tool_event_mode.py b/tests/cephfs/cephfs_journal_tool/cephfs_journal_tool_event_mode.py index 45a444df20d..21ae79d8351 100644 --- a/tests/cephfs/cephfs_journal_tool/cephfs_journal_tool_event_mode.py +++ b/tests/cephfs/cephfs_journal_tool/cephfs_journal_tool_event_mode.py @@ -1,5 +1,6 @@ import random import string +import time import traceback from tests.cephfs.cephfs_utilsV1 import FsUtils @@ -65,8 +66,26 @@ def run(ceph_cluster, **kw): [client1], kernel_mounting_dir_1, ",".join(mon_node_ips), - extra_params=f",fs={fs_name}", + extra_params=f", fs={fs_name}", ) + # down all the osd nodes + mdss = ceph_cluster.get_ceph_objects("mds") + mds_nodes_names = [] + for mds in mdss: + out, ec = mds.exec_command( + sudo=True, cmd="systemctl list-units | grep -o 'ceph-.*mds.*\\.service'" + ) + mds_nodes_names.append((mds, out.strip())) + log.info(f"NODES_MDS_info :{mds_nodes_names}") + for mds in mds_nodes_names: + mds[0].exec_command(sudo=True, cmd=f"systemctl stop {mds[1]}") + time.sleep(10) + log.info(mds_nodes_names) + log.info("All the mds nodes are down") + health_output = client1.exec_command(sudo=True, cmd="ceph -s") + log.info(health_output[0]) + if "offline" not in health_output[0]: + return 1 log.info("Testing Event Get") client1.exec_command( sudo=True, @@ -94,6 +113,7 @@ def run(ceph_cluster, **kw): out1, ec1 = client1.exec_command( sudo=True, cmd=f"cephfs-journal-tool --rank {fs_name}:0 event get json --path output.json", + timeout=600, ) if "Wrote" not in ec1: log.error(out1) @@ -202,6 +222,13 @@ def run(ceph_cluster, **kw): log.error(traceback.format_exc()) return 1 finally: + # start mds + for mds in mds_nodes_names: + mds_node = mds[0] + mds_name = mds[1] + # reset failed counter + mds_node.exec_command(sudo=True, cmd=f"systemctl reset-failed {mds_name}") + mds_node.exec_command(sudo=True, cmd=f"systemctl restart {mds_name}") # Cleanup fs_util.client_clean_up( "umount", fuse_clients=[clients[0]], mounting_dir=fuse_mounting_dir_1 diff --git a/tests/cephfs/cephfs_journal_tool/cephfs_journal_tool_header_mode.py b/tests/cephfs/cephfs_journal_tool/cephfs_journal_tool_header_mode.py index 375bc308cc7..4e66d875fe3 100644 --- a/tests/cephfs/cephfs_journal_tool/cephfs_journal_tool_header_mode.py +++ b/tests/cephfs/cephfs_journal_tool/cephfs_journal_tool_header_mode.py @@ -69,21 +69,33 @@ def run(ceph_cluster, **kw): ",".join(mon_node_ips), extra_params=f",fs={fs_name}", ) + # failing the file system + # down all the osd nodes + health_output = client1.exec_command(sudo=True, cmd="ceph -s") + log.info(health_output[0]) + mdss = ceph_cluster.get_ceph_objects("mds") + mds_nodes_names = [] + for mds in mdss: + out, ec = mds.exec_command( + sudo=True, cmd="systemctl list-units | grep -o 'ceph-.*mds.*\\.service'" + ) + mds_nodes_names.append((mds, out.strip())) + log.info(f"NODES_MDS_info :{mds_nodes_names}") + for mds in mds_nodes_names: + mds[0].exec_command(sudo=True, cmd=f"systemctl stop {mds[1]}") + log.info("All the mds nodes are down") + log.info(mds_nodes_names) + time.sleep(10) + health_output = client1.exec_command(sudo=True, cmd="ceph -s") + log.info(health_output[0]) + if "offline" not in health_output[0]: + return 1 # test cephfs-journal-tool header get header_out1, ec1 = client1.exec_command( sudo=True, cmd=f"cephfs-journal-tool --rank {fs_name}:0 header get", check_ec=False, ) - mdss = ceph_cluster.get_ceph_objects("mds") - mds_list = [] - for mds in mdss: - daemon_name = fs_util.deamon_op(mds, "mds", "stop") - fs_util.check_deamon_status(mds, "mds", "inactive") - mds_list.append((mds, daemon_name)) - log.info("All the mds nodes are down") - log.info(mds_list) - time.sleep(10) health_output = client1.exec_command(sudo=True, cmd="ceph -s") log.info(health_output[0]) if "offline" not in health_output[0]: @@ -198,10 +210,12 @@ def run(ceph_cluster, **kw): finally: # Cleanup # start mds - for mds in mds_list: - fs_util.deamon_op(mds[0], "mds", "reset-failed", service_name=mds[1]) - fs_util.deamon_op(mds[0], "mds", "start", service_name=mds[1]) - fs_util.check_deamon_status(mds[0], "mds", "active") + for mds in mds_nodes_names: + mds_node = mds[0] + mds_name = mds[1] + # reset failed counter + mds_node.exec_command(sudo=True, cmd=f"systemctl reset-failed {mds_name}") + mds_node.exec_command(sudo=True, cmd=f"systemctl restart {mds_name}") time.sleep(10) health_output = client1.exec_command(sudo=True, cmd="ceph -s") log.info(health_output[0]) diff --git a/tests/cephfs/cephfs_journal_tool/cephfs_journal_tool_journal_mode.py b/tests/cephfs/cephfs_journal_tool/cephfs_journal_tool_journal_mode.py index c5e333d5927..a48cdb1fd6c 100644 --- a/tests/cephfs/cephfs_journal_tool/cephfs_journal_tool_journal_mode.py +++ b/tests/cephfs/cephfs_journal_tool/cephfs_journal_tool_journal_mode.py @@ -66,21 +66,23 @@ def run(ceph_cluster, **kw): ",".join(mon_node_ips), extra_params=f",fs={fs_name}", ) - # down all the osd nodes + # getting the mds name mdss = ceph_cluster.get_ceph_objects("mds") - mds_list = [] + mds_nodes_names = [] for mds in mdss: - daemon_name = fs_util.deamon_op(mds, "mds", "stop") - fs_util.check_deamon_status(mds, "mds", "inactive") - mds_list.append((mds, daemon_name)) - log.info("All the mds nodes are down") - log.info(mds_list) + out, ec = mds.exec_command( + sudo=True, cmd="systemctl list-units | grep -o 'ceph-.*mds.*\\.service'" + ) + mds_nodes_names.append((mds, out.strip())) + log.info(f"NODES_MDS_info :{mds_nodes_names}") + for mds in mds_nodes_names: + mds[0].exec_command(sudo=True, cmd=f"systemctl stop {mds[1]}") time.sleep(10) + log.info("All the mds nodes are down") health_output = client1.exec_command(sudo=True, cmd="ceph -s") log.info(health_output[0]) if "offline" not in health_output[0]: return 1 - # run "cephfs-journal-tool --rank [fs_name]:0 journal inspect" command inspect_out, ec_1 = client1.exec_command( sudo=True, cmd=f"cephfs-journal-tool --rank {fs_name}:0 journal inspect" @@ -163,11 +165,12 @@ def run(ceph_cluster, **kw): return 1 finally: # start mds - for mds in mds_list: - fs_util.deamon_op(mds[0], "mds", "reset-failed", service_name=mds[1]) - daemon_name = fs_util.deamon_op(mds[0], "mds", "start", service_name=mds[1]) - time.sleep(10) - fs_util.check_deamon_status(mds[0], "active", daemon_name) + for mds in mds_nodes_names: + mds_node = mds[0] + mds_name = mds[1] + # reset failed counter + mds_node.exec_command(sudo=True, cmd=f"systemctl reset-failed {mds_name}") + mds_node.exec_command(sudo=True, cmd=f"systemctl restart {mds_name}") time.sleep(10) health_output = client1.exec_command(sudo=True, cmd="ceph -s") diff --git a/tests/cephfs/cephfs_top/cephfs_top_dump.py b/tests/cephfs/cephfs_top/cephfs_top_dump.py index df502f44c3d..2197000693f 100644 --- a/tests/cephfs/cephfs_top/cephfs_top_dump.py +++ b/tests/cephfs/cephfs_top/cephfs_top_dump.py @@ -291,6 +291,8 @@ def run(ceph_cluster, **kw): log.error(traceback.format_exc()) return 1 finally: + log.info("Disable mgr stats") + client1.exec_command(sudo=True, cmd="ceph mgr module disable stats") fs_util.client_clean_up( "umount", fuse_clients=[clients[0]], mounting_dir=fuse_mounting_dir_1 ) diff --git a/tests/cephfs/cephfs_top/cephfs_top_negative_tests.py b/tests/cephfs/cephfs_top/cephfs_top_negative_tests.py index 87cb74c50a2..e32fbaff6c0 100644 --- a/tests/cephfs/cephfs_top/cephfs_top_negative_tests.py +++ b/tests/cephfs/cephfs_top/cephfs_top_negative_tests.py @@ -561,6 +561,8 @@ def run(ceph_cluster, **kw): return 1 finally: crash_status_after = fs_util_v1.get_crash_ls_new(client) + log.info("Disable mgr stats") + client.exec_command(sudo=True, cmd="ceph mgr module disable stats") log.info(f"Crash status after Test: {crash_status_before}") if crash_status_before != crash_status_after: assert ( diff --git a/tests/cephfs/cephfs_top/validate_fs_top_stats.py b/tests/cephfs/cephfs_top/validate_fs_top_stats.py index fffb12a1b02..03cfbd3afa1 100644 --- a/tests/cephfs/cephfs_top/validate_fs_top_stats.py +++ b/tests/cephfs/cephfs_top/validate_fs_top_stats.py @@ -365,6 +365,8 @@ def run(ceph_cluster, **kw): ) fs_util.remove_fs(client1, fs_name) log.info("Successfully unmounted the clients") + log.info("Disable mgr stats") + client1.exec_command(sudo=True, cmd="ceph mgr module disable stats") client1.exec_command( sudo=True, cmd="mv /etc/fstab.backup /etc/fstab", check_ec=False ) diff --git a/tests/cephfs/cephfs_utilsV1.py b/tests/cephfs/cephfs_utilsV1.py index a62a097e602..fa0471834c8 100644 --- a/tests/cephfs/cephfs_utilsV1.py +++ b/tests/cephfs/cephfs_utilsV1.py @@ -3942,14 +3942,19 @@ def get_mds_metrics(self, client, rank=0, mounted_dir="", fs_name="cephfs"): sudo=True, cmd=f"ceph fs status {fs_name} -f json | jq '.mdsmap[] | select(.rank == {rank}) | .name'", ) + log.info(f"Executing MDS name with rank command: {ranked_mds}") ranked_mds = ranked_mds.replace('"', "").replace("\n", "") client_id_cmd = ( f"ceph tell mds.{ranked_mds} session ls | jq '.[] | select(.client_metadata.mount_point" f' != null and (.client_metadata.mount_point | contains("{mounted_dir}"))) | .id\'' ) + log.info(f"Executing Client ID Command : {client_id_cmd}") client_id, _ = client.exec_command(sudo=True, cmd=client_id_cmd) client_id = client_id.replace('"', "").replace("\n", "") - log.info(f"Client ID : {client_id} for Mounted Directory : {mounted_dir}") + if client_id == "": + log.error(f"Client not found for Mounted Directory : {mounted_dir}") + return 1 + log.info(f"Client ID :[{client_id}] for Mounted Directory : [{mounted_dir}]") cmd = f""" ceph tell mds.{ranked_mds} counter dump 2>/dev/null | \ jq -r '. | to_entries | map(select(.key | match("mds_client_metrics"))) | \ .[].value[] | select(.labels.client != null and (.labels.client | contains("{client_id}")) @@ -3959,6 +3964,9 @@ def get_mds_metrics(self, client, rank=0, mounted_dir="", fs_name="cephfs"): log.info( f"Metrics for MDS : {ranked_mds} Mounted Directory: {mounted_dir} and Client : {client_id} is {metrics_out}" ) + if metrics_out == "": + log.error(f"Metrics not found for MDS : {ranked_mds}") + return 1 metrics_out = json.loads(str(metrics_out)) return metrics_out diff --git a/tests/cephfs/cephfs_vol_management/cephfs_vol_mgmt_subvol_group_scenarios.py b/tests/cephfs/cephfs_vol_management/cephfs_vol_mgmt_subvolgroup_scenarios.py similarity index 100% rename from tests/cephfs/cephfs_vol_management/cephfs_vol_mgmt_subvol_group_scenarios.py rename to tests/cephfs/cephfs_vol_management/cephfs_vol_mgmt_subvolgroup_scenarios.py diff --git a/tests/nfs/test_earmark_feature.py b/tests/nfs/test_earmark_feature.py index abb45302405..483c91cd29d 100644 --- a/tests/nfs/test_earmark_feature.py +++ b/tests/nfs/test_earmark_feature.py @@ -1,7 +1,8 @@ from time import sleep from cli.ceph.ceph import Ceph -from tests.cephfs.cephfs_utilsV1 import FsUtils +from cli.ceph.fs.sub_volume import SubVolume +from cli.ceph.fs.sub_volume_group import SubVolumeGroup from tests.nfs.nfs_operations import check_nfs_daemons_removed from tests.nfs.nfs_test_multiple_filesystem_exports import create_nfs_export from utility.log import Log @@ -14,7 +15,6 @@ def run(ceph_cluster, **kw): Args: **kw: Key/value pairs of configuration information to be used in the test. """ - fs_util = FsUtils(ceph_cluster) config = kw.get("config") nfs_nodes = ceph_cluster.get_nodes("nfs") clients = ceph_cluster.get_nodes("client") @@ -27,32 +27,24 @@ def run(ceph_cluster, **kw): subvolume_group = "ganeshagroup" subvolume_name = "subvolume" earmark = config.get("earmark") + subvolume_cli = SubVolume(clients[0], "ceph fs") - fs_util.create_subvolumegroup( - client=clients[0], vol_name=fs_name, group_name=subvolume_group - ) - fs_util.create_subvolume( - client=clients[0], - vol_name=fs_name, - subvol_name=subvolume_name, - validate=True, - group_name=subvolume_group, - ) - fs_util.set_subvolume_earmark( - client=clients[0], - vol_name=fs_name, - subvol_name=subvolume_name, - group_name=subvolume_group, - earmark=earmark, + SubVolumeGroup(clients[0], "ceph fs").create(volume=fs_name, group=subvolume_group) + + subvolume_cli.create( + volume=fs_name, subvolume=subvolume_name, group_name=subvolume_group ) - subvolume_earmark = fs_util.get_subvolume_earmark( - client=clients[0], - vol_name=fs_name, - subvol_name=subvolume_name, + subvolume_cli.set_subvolume_earmark( + volume=fs_name, + subvolume=subvolume_name, + earmark=earmark, group_name=subvolume_group, ) + subvolume_earmark = subvolume_cli.get_subvolume_earmark( + volume=fs_name, subvolume=subvolume_name, group_name=subvolume_group + ) if operation == "verify_earmark": if earmark not in subvolume_earmark: log.error(f'earmark "{earmark}" not found on subvolume {subvolume_name}') @@ -64,21 +56,15 @@ def run(ceph_cluster, **kw): if operation == "rename_earmark": earmark2 = "nfs" - fs_util.remove_subvolume_earmark( - client=clients[0], - vol_name=fs_name, - subvol_name=subvolume_name, - group_name=subvolume_group, + subvolume_cli.remove_subvolume_earmark( + volume=fs_name, subvolume=subvolume_name, group_name=subvolume_group ) - - fs_util.set_subvolume_earmark( - client=clients[0], - vol_name=fs_name, - subvol_name=subvolume_name, + subvolume_cli.set_subvolume_earmark( + volume=fs_name, + subvolume=subvolume_name, group_name=subvolume_group, earmark=earmark2, ) - try: # Setup nfs cluster Ceph(clients[0]).nfs.cluster.create( @@ -88,30 +74,23 @@ def run(ceph_cluster, **kw): if operation == "override_earmark": earmark2 = "smb" - fs_util.set_subvolume_earmark( - client=clients[0], - vol_name=fs_name, - subvol_name=subvolume_name, + subvolume_cli.set_subvolume_earmark( + volume=fs_name, + subvolume=subvolume_name, group_name=subvolume_group, earmark=earmark2, ) # re-verifying the earmark - subvolume_earmark = fs_util.get_subvolume_earmark( - client=clients[0], - vol_name=fs_name, - subvol_name=subvolume_name, - group_name=subvolume_group, + subvolume_earmark = subvolume_cli.get_subvolume_earmark( + volume=fs_name, subvolume=subvolume_name, group_name=subvolume_group ) log.info(f"subvolume earmark is {subvolume_earmark}") - sub_volume_path = fs_util.get_subvolume_info( - client=clients[0], - vol_name=fs_name, - subvol_name=subvolume_name, - group_name=subvolume_group, - ).get("path") + sub_volume_path = subvolume_cli.getpath( + volume=fs_name, subvolume=subvolume_name, group_name=subvolume_group + ) try: create_nfs_export( @@ -143,13 +122,10 @@ def run(ceph_cluster, **kw): return 1 finally: log.info("Cleaning up in progress") - fs_util.remove_subvolume( - client=clients[0], - vol_name=fs_name, - subvol_name=subvolume_name, - validate=True, - group_name=subvolume_group, + subvolume_cli.rm( + volume=fs_name, subvolume=subvolume_name, group=subvolume_group ) + log.info(f"Removed the subvolume {subvolume_name} from group {subvolume_group}") Ceph(clients[0]).nfs.cluster.delete(nfs_name) sleep(30) diff --git a/tests/parallel/test_parallel.py b/tests/parallel/test_parallel.py index 2c6e7cd13ab..a3106139268 100644 --- a/tests/parallel/test_parallel.py +++ b/tests/parallel/test_parallel.py @@ -48,14 +48,12 @@ def run(**kwargs): parallel_tests = kwargs["parallel"] parallel_tcs = manager.list() max_time = kwargs.get("config", {}).get("max_time", None) - wait_till_complete = kwargs.get("config", {}).get("wait_till_complete", True) cancel_pending = kwargs.get("config", {}).get("cancel_pending", False) parallel_log.info(kwargs) with parallel( thread_pool=False, timeout=max_time, - shutdown_wait=wait_till_complete, shutdown_cancel_pending=cancel_pending, ) as p: for test in parallel_tests: diff --git a/utility/utils.py b/utility/utils.py index ae3c2b991c8..141de59f7c7 100644 --- a/utility/utils.py +++ b/utility/utils.py @@ -574,7 +574,11 @@ def set_config_param(node): rgw_process_name = rgw_process[0].split()[0] # add the configuration/s to be set on service - configs = ["rgw_max_objs_per_shard 5", "rgw_lc_debug_interval 30"] + configs = [ + "rgw_max_objs_per_shard 5", + "rgw_lc_debug_interval 30", + "rgw_restore_debug_interval 30", + ] for config_cmd in configs: node.exec_command(cmd=f"ceph config set client.{rgw_process_name} {config_cmd}")