Skip to content

Commit

Permalink
Common Test file for all volume operations tests
Browse files Browse the repository at this point in the history
Signed-off-by: Hemanth <[email protected]>
  • Loading branch information
hkadam134 committed Jan 20, 2025
1 parent 92b25ea commit 1887cac
Show file tree
Hide file tree
Showing 9 changed files with 351 additions and 97 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -328,12 +328,6 @@ tests:
polarion-id: CEPH-83575032
desc: Subvolume metadata creation, delete and modifying test
abort-on-fail: false
- test:
name: Test fs volume deletion when mon_allow_pool_delete is false
module: cephfs_vol_management.fs_del_allow_pool_false.py
polarion-id: CEPH-83574159
desc: Test fs volume deletion when mon_allow_pool_delete is false
abort-on-fail: false
- test:
name: cephfs_vol_mgmt_fs_life_cycle
module: cephfs_vol_management.cephfs_vol_mgmt_fs_life_cycle.py
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -331,12 +331,6 @@ tests:
polarion-id: CEPH-83575032
desc: Subvolume metadata creation, delete and modifying test
abort-on-fail: false
- test:
name: Test fs volume deletion when mon_allow_pool_delete is false
module: cephfs_vol_management.fs_del_allow_pool_false.py
polarion-id: CEPH-83574159
desc: Test fs volume deletion when mon_allow_pool_delete is false
abort-on-fail: false
- test:
name: cephfs_vol_mgmt_fs_life_cycle
module: cephfs_vol_management.cephfs_vol_mgmt_fs_life_cycle.py
Expand Down
10 changes: 2 additions & 8 deletions suites/quincy/cephfs/tier-2_file-dir-lay_vol-mgmt_nfs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -307,12 +307,6 @@ tests:
polarion-id: CEPH-83575032
desc: Subvolume metadata creation, delete and modifying test
abort-on-fail: false
- test:
name: Test fs volume deletion when mon_allow_pool_delete is false
module: cephfs_vol_management.fs_del_allow_pool_false.py
polarion-id: CEPH-83574159
desc: Test fs volume deletion when mon_allow_pool_delete is false
abort-on-fail: false
- test:
name: cephfs_vol_mgmt_fs_life_cycle
module: cephfs_vol_management.cephfs_vol_mgmt_fs_life_cycle.py
Expand Down Expand Up @@ -477,8 +471,8 @@ tests:
name: "nfs_multiple_export_using_single_conf"
polarion-id: "CEPH-83575082"
- test:
name: Basic info validation after volume creation and deletion
name: CephFS Volume Operations
module: cephfs_vol_management.cephfs_vol_mgmt_test_volume.py
polarion-id: CEPH-83604097
desc: Basic info validation after volume creation and deletion
desc: Tests to validation CephFS Volume Operations
abort-on-fail: true
6 changes: 0 additions & 6 deletions suites/reef/cephfs/tier-2_cephfs_test-volume-management.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -328,12 +328,6 @@ tests:
polarion-id: CEPH-83575032
desc: Subvolume metadata creation, delete and modifying test
abort-on-fail: false
- test:
name: Test fs volume deletion when mon_allow_pool_delete is false
module: cephfs_vol_management.fs_del_allow_pool_false.py
polarion-id: CEPH-83574159
desc: Test fs volume deletion when mon_allow_pool_delete is false
abort-on-fail: false
- test:
name: cephfs_vol_mgmt_fs_life_cycle
module: cephfs_vol_management.cephfs_vol_mgmt_fs_life_cycle.py
Expand Down
10 changes: 2 additions & 8 deletions suites/reef/cephfs/tier-2_file-dir-lay_vol-mgmt_nfs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -307,12 +307,6 @@ tests:
polarion-id: CEPH-83575032
desc: Subvolume metadata creation, delete and modifying test
abort-on-fail: false
- test:
name: Test fs volume deletion when mon_allow_pool_delete is false
module: cephfs_vol_management.fs_del_allow_pool_false.py
polarion-id: CEPH-83574159
desc: Test fs volume deletion when mon_allow_pool_delete is false
abort-on-fail: false
- test:
name: cephfs_vol_mgmt_fs_life_cycle
module: cephfs_vol_management.cephfs_vol_mgmt_fs_life_cycle.py
Expand Down Expand Up @@ -477,8 +471,8 @@ tests:
name: "nfs_multiple_export_using_single_conf"
polarion-id: "CEPH-83575082"
- test:
name: Basic info validation after volume creation and deletion
name: CephFS Volume Operations
module: cephfs_vol_management.cephfs_vol_mgmt_test_volume.py
polarion-id: CEPH-83604097
desc: Basic info validation after volume creation and deletion
desc: Tests to validation CephFS Volume Operations
abort-on-fail: true
Original file line number Diff line number Diff line change
Expand Up @@ -328,12 +328,6 @@ tests:
polarion-id: CEPH-83575032
desc: Subvolume metadata creation, delete and modifying test
abort-on-fail: false
- test:
name: Test fs volume deletion when mon_allow_pool_delete is false
module: cephfs_vol_management.fs_del_allow_pool_false.py
polarion-id: CEPH-83574159
desc: Test fs volume deletion when mon_allow_pool_delete is false
abort-on-fail: false
- test:
name: cephfs_vol_mgmt_fs_life_cycle
module: cephfs_vol_management.cephfs_vol_mgmt_fs_life_cycle.py
Expand Down
10 changes: 2 additions & 8 deletions suites/squid/cephfs/tier-2_file-dir-lay_vol-mgmt_nfs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -307,12 +307,6 @@ tests:
polarion-id: CEPH-83575032
desc: Subvolume metadata creation, delete and modifying test
abort-on-fail: false
- test:
name: Test fs volume deletion when mon_allow_pool_delete is false
module: cephfs_vol_management.fs_del_allow_pool_false.py
polarion-id: CEPH-83574159
desc: Test fs volume deletion when mon_allow_pool_delete is false
abort-on-fail: false
- test:
name: cephfs_vol_mgmt_fs_life_cycle
module: cephfs_vol_management.cephfs_vol_mgmt_fs_life_cycle.py
Expand Down Expand Up @@ -495,8 +489,8 @@ tests:
name: "nfs_multiple_export_using_single_conf"
polarion-id: "CEPH-83575082"
- test:
name: Basic info validation after volume creation and deletion
name: CephFS Volume Operations
module: cephfs_vol_management.cephfs_vol_mgmt_test_volume.py
polarion-id: CEPH-83604097
desc: Basic info validation after volume creation and deletion
desc: Tests to validation CephFS Volume Operations
abort-on-fail: true
40 changes: 31 additions & 9 deletions tests/cephfs/cephfs_utilsV1.py
Original file line number Diff line number Diff line change
Expand Up @@ -1325,30 +1325,52 @@ def create_osd_pool(
Returns:
Returns the cmd_out and cmd_rc for the create command.
"""
# Additional configuration for erasure-coded pools
if erasure:
pool_cmd = f"ceph osd pool create {pool_name} {pg_num or ''} {pgp_num or ''} erasure"
if kwargs.get("erasure_code_profile"):
pool_cmd += f" {kwargs.get('erasure_code_profile')}"
else:
pool_cmd = f"ceph osd pool create {pool_name} {pg_num or ''} {pgp_num or ''} replicated"
log.info(f"Setting allow_ec_overwrites to true for erasure-coded pool: {pool_name}")
client.exec_command(
sudo=True, cmd=f"ceph osd pool set {pool_name} allow_ec_overwrites true"
)

# Determine pool type
pool_type = "erasure" if erasure else "replicated"
log.info(f"Creating {pool_type} OSD pool: {pool_name}")

# Build the pool creation command
pool_cmd = f"ceph osd pool create {pool_name} {pg_num} {pgp_num or ''} {pool_type}".strip()

# Append erasure code profile if specified
if erasure and kwargs.get("erasure_code_profile"):
pool_cmd += f" {kwargs['erasure_code_profile']}"

# Append CRUSH rule name if specified
if kwargs.get("crush_rule_name"):
pool_cmd += f" {kwargs.get('crush_rule_name')}"
pool_cmd += f" {kwargs['crush_rule_name']}"

# Append expected number of objects if specified
if kwargs.get("expected_num_objects"):
pool_cmd += f" {kwargs.get('expected_num_objects')}"
pool_cmd += f" {kwargs['expected_num_objects']}"

# Set autoscale mode if specified
if erasure and kwargs.get("autoscale_mode"):
pool_cmd += f" --autoscale-mode={kwargs.get('autoscale_mode')}"
pool_cmd += f" --autoscale-mode={kwargs['autoscale_mode']}"

# Execute the pool creation command
cmd_out, cmd_rc = client.exec_command(
sudo=True, cmd=pool_cmd, check_ec=kwargs.get("check_ec", True)
)

log.info(f"OSD pool {pool_name} created successfully")

# Validate pool creation
if validate:
log.info(f"Validating creation of OSD pool: {pool_name}")
out, rc = client.exec_command(
sudo=True, cmd="ceph osd pool ls --format json"
)
pool_ls = json.loads(out)
if pool_name not in pool_ls:
log.error(f"Creation of OSD pool {pool_name} failed")
raise CommandFailed(f"Creation of OSD pool: {pool_name} failed")

return cmd_out, cmd_rc
Expand Down Expand Up @@ -1951,7 +1973,7 @@ def remove_fs(self, client, vol_name, validate=True, **kwargs):
out, rc = client.exec_command(sudo=True, cmd="ceph fs ls --format json")
volname_ls = json.loads(out)
if vol_name in [i["name"] for i in volname_ls]:
raise CommandFailed(f"Creation of filesystem: {vol_name} failed")
raise CommandFailed(f"Removal of filesystem: {vol_name} failed")

log.info("Validating the deletion of pools")
outpools, rcpools = client.exec_command(
Expand Down
Loading

0 comments on commit 1887cac

Please sign in to comment.