diff --git a/cli/ceph/fs/fs.py b/cli/ceph/fs/fs.py index b1b1d922782..668b43baa50 100644 --- a/cli/ceph/fs/fs.py +++ b/cli/ceph/fs/fs.py @@ -1,7 +1,7 @@ from cli import Cli -from .sub_volume import SubVolume from .sub_volume_group import SubVolumeGroup +from .subvolume.sub_volume import SubVolume from .volume import Volume diff --git a/cli/ceph/fs/subvolume/__init__.py b/cli/ceph/fs/subvolume/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cli/ceph/fs/subvolume/earmark.py b/cli/ceph/fs/subvolume/earmark.py new file mode 100644 index 00000000000..6553f4b726a --- /dev/null +++ b/cli/ceph/fs/subvolume/earmark.py @@ -0,0 +1,50 @@ +from cli import Cli +from cli.utilities.utils import build_cmd_from_args + + +class Earmark(Cli): + """This module provides CLI interface for FS subvolume related operations""" + + def __init__(self, nodes, base_cmd): + super(Earmark, self).__init__(nodes) + self.base_cmd = f"{base_cmd} subvolume earmark" + + def set(self, volume, subvolume_name, earmark, **kwargs): + """ + Sets an earmark to the subvolume + Args: + volume (str): Name of vol where subvol is present + subvolume (str): subvol name + earmark (str): earmark name + """ + cmd = f"{self.base_cmd} set {volume} {subvolume_name} --earmark {earmark}{build_cmd_from_args(**kwargs)}" + out = self.execute(sudo=True, cmd=cmd) + if isinstance(out, tuple): + return out[0].strip() + return out + + def get(self, volume, subvolume_name, **kwargs): + """ + Gets earmark from subvolume, if earmark is already present + Args: + volume (str): Name of vol where subvol is present + subvolume (str): subvol name + """ + cmd = f"{self.base_cmd} get {volume} {subvolume_name}{build_cmd_from_args(**kwargs)}" + out = self.execute(sudo=True, cmd=cmd) + if isinstance(out, tuple): + return out[0].strip() + return out + + def remove(self, volume, subvolume_name, **kwargs): + """ + Remove the earmark from subvolume + Args: + volume (str): Name of vol where subvol is present + subvolume (str): subvol name + """ + cmd = f"{self.base_cmd} rm {volume} {subvolume_name}{build_cmd_from_args(**kwargs)}" + out = self.execute(sudo=True, cmd=cmd) + if isinstance(out, tuple): + return out[0].strip() + return out diff --git a/cli/ceph/fs/sub_volume.py b/cli/ceph/fs/subvolume/sub_volume.py similarity index 97% rename from cli/ceph/fs/sub_volume.py rename to cli/ceph/fs/subvolume/sub_volume.py index c2cb906ca41..4d0c5174f19 100644 --- a/cli/ceph/fs/sub_volume.py +++ b/cli/ceph/fs/subvolume/sub_volume.py @@ -1,6 +1,8 @@ from cli import Cli from cli.utilities.utils import build_cmd_from_args +from .earmark import Earmark + class SubVolume(Cli): """This module provides CLI interface for FS subvolume related operations""" @@ -8,6 +10,7 @@ class SubVolume(Cli): def __init__(self, nodes, base_cmd): super(SubVolume, self).__init__(nodes) self.base_cmd = f"{base_cmd} subvolume" + self.earmark = Earmark(nodes, base_cmd) def create(self, volume, subvolume, **kwargs): """ diff --git a/suites/squid/nfs/tier1-nfs-ganesha-v4-2.yaml b/suites/squid/nfs/tier1-nfs-ganesha-v4-2.yaml index 3a9fcd6b498..791bfb0df23 100644 --- a/suites/squid/nfs/tier1-nfs-ganesha-v4-2.yaml +++ b/suites/squid/nfs/tier1-nfs-ganesha-v4-2.yaml @@ -247,3 +247,48 @@ tests: config: nfs_version: 4.2 clients: 1 + + - test: + name: Delete the old earmark and set the "nfs". Create NFS export out of it + desc: Delete the old earmark and set the "nfs". Create NFS export out of it + module: test_earmark_feature.py + polarion-id: CEPH-83604500 + config: + cephfs_volume: cephfs + subvolume_group: nfsgroup + earmark: smb + operation: rename_earmark + + - test: + name: creating volume and setting earmark + desc: creating volume and setting earmark + module: test_earmark_feature.py + polarion-id: CEPH-83604497 + config: + cephfs_volume: cephfs + subvolume_group: nfsgroup + earmark: nfs + operation: verify_earmark + + - test: + name: Override the earmark and try to create nfs export. It should fail + desc: Override the earmark and try to create nfs export. It should fail + module: test_earmark_feature.py + polarion-id: CEPH-83604499 + config: + cephfs_volume: cephfs + subvolume_group: nfsgroup + earmark: nfs + operation: override_earmark + + - test: + name: Set the earmark as "smb" and try to use the same subvolume for nfs export + desc: Set the earmark as "smb" and try to use the same subvolume for nfs export + module: test_earmark_feature.py + polarion-id: CEPH-83604498 + config: + cephfs_volume: cephfs + subvolume_group: nfsgroup + nfs_version: 4.2 + earmark: smb + operation: wrong_earmark diff --git a/tests/nfs/test_earmark_feature.py b/tests/nfs/test_earmark_feature.py new file mode 100644 index 00000000000..425c2450324 --- /dev/null +++ b/tests/nfs/test_earmark_feature.py @@ -0,0 +1,126 @@ +from time import sleep + +from cli.ceph.ceph import Ceph +from tests.nfs.nfs_operations import check_nfs_daemons_removed +from tests.nfs.nfs_test_multiple_filesystem_exports import create_nfs_export +from utility.log import Log + +log = Log(__name__) + + +def run(ceph_cluster, **kw): + """Verify readdir ops + Args: + **kw: Key/value pairs of configuration information to be used in the test. + """ + config = kw.get("config") + nfs_nodes = ceph_cluster.get_nodes("nfs") + clients = ceph_cluster.get_nodes("client") + nfs_node = nfs_nodes[0] + operation = config.get("operation") + fs_name = config.get("cephfs_volume", "ceph_fs") + nfs_name = "cephfs-nfs" + nfs_export = "/export" + nfs_server_name = nfs_node.hostname + subvolume_group = "ganeshagroup" + subvolume_name = "subvolume" + earmark = config.get("earmark") + ceph_fs_obj = Ceph(clients[0]).fs + + ceph_fs_obj.sub_volume_group.create(volume=fs_name, group=subvolume_group) + + ceph_fs_obj.sub_volume.create( + volume=fs_name, subvolume=subvolume_name, group_name=subvolume_group + ) + + ceph_fs_obj.sub_volume.earmark.set( + volume=fs_name, + subvolume_name=subvolume_name, + earmark=earmark, + group_name=subvolume_group, + ) + + subvolume_earmark = ceph_fs_obj.sub_volume.earmark.get( + volume=fs_name, subvolume_name=subvolume_name, group_name=subvolume_group + ) + if operation == "verify_earmark": + if earmark not in subvolume_earmark: + log.error(f'earmark "{earmark}" not found on subvolume {subvolume_name}') + # raise OperationFailedError(f"earmark \"{earmark}\" not found on subvolume {subvolume_name}") + return 1 + + log.info(f'earmark "{earmark}" found on subvolume {subvolume_name}') + return 0 + + if operation == "rename_earmark": + earmark2 = "nfs" + ceph_fs_obj.sub_volume.earmark.remove( + volume=fs_name, subvolume_name=subvolume_name, group_name=subvolume_group + ) + ceph_fs_obj.sub_volume.earmark.set( + volume=fs_name, + subvolume_name=subvolume_name, + group_name=subvolume_group, + earmark=earmark2, + ) + try: + # Setup nfs cluster + Ceph(clients[0]).nfs.cluster.create( + name=nfs_name, nfs_server=nfs_server_name, ha=False, vip=None + ) + sleep(3) + + if operation == "override_earmark": + earmark2 = "smb" + ceph_fs_obj.sub_volume.earmark.set( + volume=fs_name, + subvolume_name=subvolume_name, + group_name=subvolume_group, + earmark=earmark2, + ) + + # re-verifying the earmark + subvolume_earmark = ceph_fs_obj.sub_volume.earmark.get( + volume=fs_name, subvolume_name=subvolume_name, group_name=subvolume_group + ) + + log.info(f"subvolume earmark is {subvolume_earmark}") + + sub_volume_path = ceph_fs_obj.sub_volume.getpath( + volume=fs_name, subvolume=subvolume_name, group_name=subvolume_group + ) + + create_nfs_export( + clients[0], fs_name, nfs_name, nfs_export, sub_volume_path, "" + ) + log.info( + f"nfs export {nfs_export} has been created for subvolume path {nfs_export}" + ) + + Ceph(clients[0]).nfs.export.delete(nfs_name, nfs_export) + log.info( + f"nfs export {nfs_export} has been deleted for subvolume path {nfs_export}" + ) + return 0 + + except Exception as e: + if "earmark has already been set by smb" in e.args[0] and operation in [ + "override_earmark", + "wrong_earmark", + ]: + log.info(f"expected failure, earmark has already been set by smb {e}") + return 0 + else: + log.error(f"Unexpected {e}") + + log.error(f"unable to create nfs cluster {nfs_name} with error {e}") + return 1 + finally: + log.info("Cleaning up in progress") + ceph_fs_obj.sub_volume.rm( + volume=fs_name, subvolume=subvolume_name, group=subvolume_group + ) + log.info(f"Removed the subvolume {subvolume_name} from group {subvolume_group}") + Ceph(clients[0]).nfs.cluster.delete(nfs_name) + sleep(30) + check_nfs_daemons_removed(clients[0])