Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Earmark feature tests #4368

Merged
merged 1 commit into from
Jan 28, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cli/ceph/fs/fs.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from cli import Cli

from .sub_volume import SubVolume
from .sub_volume_group import SubVolumeGroup
from .subvolume.sub_volume import SubVolume
from .volume import Volume


Expand Down
Empty file.
50 changes: 50 additions & 0 deletions cli/ceph/fs/subvolume/earmark.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
from cli import Cli
from cli.utilities.utils import build_cmd_from_args


class Earmark(Cli):
"""This module provides CLI interface for FS subvolume related operations"""

def __init__(self, nodes, base_cmd):
super(Earmark, self).__init__(nodes)
self.base_cmd = f"{base_cmd} subvolume earmark"

def set(self, volume, subvolume_name, earmark, **kwargs):
"""
Sets an earmark to the subvolume
Args:
volume (str): Name of vol where subvol is present
subvolume (str): subvol name
earmark (str): earmark name
"""
cmd = f"{self.base_cmd} set {volume} {subvolume_name} --earmark {earmark}{build_cmd_from_args(**kwargs)}"
out = self.execute(sudo=True, cmd=cmd)
if isinstance(out, tuple):
return out[0].strip()
return out

def get(self, volume, subvolume_name, **kwargs):
"""
Gets earmark from subvolume, if earmark is already present
Args:
volume (str): Name of vol where subvol is present
subvolume (str): subvol name
"""
cmd = f"{self.base_cmd} get {volume} {subvolume_name}{build_cmd_from_args(**kwargs)}"
out = self.execute(sudo=True, cmd=cmd)
if isinstance(out, tuple):
return out[0].strip()
return out

def remove(self, volume, subvolume_name, **kwargs):
"""
Remove the earmark from subvolume
Args:
volume (str): Name of vol where subvol is present
subvolume (str): subvol name
"""
cmd = f"{self.base_cmd} rm {volume} {subvolume_name}{build_cmd_from_args(**kwargs)}"
out = self.execute(sudo=True, cmd=cmd)
if isinstance(out, tuple):
return out[0].strip()
return out
Original file line number Diff line number Diff line change
@@ -1,13 +1,16 @@
from cli import Cli
from cli.utilities.utils import build_cmd_from_args

from .earmark import Earmark


class SubVolume(Cli):
"""This module provides CLI interface for FS subvolume related operations"""

def __init__(self, nodes, base_cmd):
super(SubVolume, self).__init__(nodes)
self.base_cmd = f"{base_cmd} subvolume"
self.earmark = Earmark(nodes, base_cmd)

def create(self, volume, subvolume, **kwargs):
"""
Expand Down
45 changes: 45 additions & 0 deletions suites/squid/nfs/tier1-nfs-ganesha-v4-2.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -247,3 +247,48 @@ tests:
config:
nfs_version: 4.2
clients: 1

- test:
name: Delete the old earmark and set the "nfs". Create NFS export out of it
desc: Delete the old earmark and set the "nfs". Create NFS export out of it
module: test_earmark_feature.py
polarion-id: CEPH-83604500
config:
cephfs_volume: cephfs
subvolume_group: nfsgroup
earmark: smb
operation: rename_earmark

- test:
name: creating volume and setting earmark
desc: creating volume and setting earmark
module: test_earmark_feature.py
polarion-id: CEPH-83604497
config:
cephfs_volume: cephfs
subvolume_group: nfsgroup
earmark: nfs
operation: verify_earmark

- test:
name: Override the earmark and try to create nfs export. It should fail
desc: Override the earmark and try to create nfs export. It should fail
module: test_earmark_feature.py
polarion-id: CEPH-83604499
config:
cephfs_volume: cephfs
subvolume_group: nfsgroup
earmark: nfs
operation: override_earmark

- test:
name: Set the earmark as "smb" and try to use the same subvolume for nfs export
desc: Set the earmark as "smb" and try to use the same subvolume for nfs export
module: test_earmark_feature.py
polarion-id: CEPH-83604498
config:
cephfs_volume: cephfs
subvolume_group: nfsgroup
nfs_version: 4.2
earmark: smb
operation: wrong_earmark
126 changes: 126 additions & 0 deletions tests/nfs/test_earmark_feature.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
from time import sleep

from cli.ceph.ceph import Ceph
from tests.nfs.nfs_operations import check_nfs_daemons_removed
from tests.nfs.nfs_test_multiple_filesystem_exports import create_nfs_export
from utility.log import Log

log = Log(__name__)


def run(ceph_cluster, **kw):
"""Verify readdir ops
Args:
**kw: Key/value pairs of configuration information to be used in the test.
"""
config = kw.get("config")
nfs_nodes = ceph_cluster.get_nodes("nfs")
clients = ceph_cluster.get_nodes("client")
nfs_node = nfs_nodes[0]
operation = config.get("operation")
fs_name = config.get("cephfs_volume", "ceph_fs")
nfs_name = "cephfs-nfs"
nfs_export = "/export"
nfs_server_name = nfs_node.hostname
subvolume_group = "ganeshagroup"
subvolume_name = "subvolume"
earmark = config.get("earmark")
ceph_fs_obj = Ceph(clients[0]).fs

ceph_fs_obj.sub_volume_group.create(volume=fs_name, group=subvolume_group)

ceph_fs_obj.sub_volume.create(
volume=fs_name, subvolume=subvolume_name, group_name=subvolume_group
)

ceph_fs_obj.sub_volume.earmark.set(
volume=fs_name,
subvolume_name=subvolume_name,
earmark=earmark,
group_name=subvolume_group,
)

subvolume_earmark = ceph_fs_obj.sub_volume.earmark.get(
volume=fs_name, subvolume_name=subvolume_name, group_name=subvolume_group
)
if operation == "verify_earmark":
if earmark not in subvolume_earmark:
log.error(f'earmark "{earmark}" not found on subvolume {subvolume_name}')
# raise OperationFailedError(f"earmark \"{earmark}\" not found on subvolume {subvolume_name}")
return 1

log.info(f'earmark "{earmark}" found on subvolume {subvolume_name}')
return 0

if operation == "rename_earmark":
Harishacharya-redhat marked this conversation as resolved.
Show resolved Hide resolved
earmark2 = "nfs"
ceph_fs_obj.sub_volume.earmark.remove(
volume=fs_name, subvolume_name=subvolume_name, group_name=subvolume_group
)
ceph_fs_obj.sub_volume.earmark.set(
volume=fs_name,
subvolume_name=subvolume_name,
group_name=subvolume_group,
earmark=earmark2,
)
try:
# Setup nfs cluster
Ceph(clients[0]).nfs.cluster.create(
name=nfs_name, nfs_server=nfs_server_name, ha=False, vip=None
)
sleep(3)

if operation == "override_earmark":
earmark2 = "smb"
ceph_fs_obj.sub_volume.earmark.set(
volume=fs_name,
subvolume_name=subvolume_name,
group_name=subvolume_group,
earmark=earmark2,
)

# re-verifying the earmark
subvolume_earmark = ceph_fs_obj.sub_volume.earmark.get(
volume=fs_name, subvolume_name=subvolume_name, group_name=subvolume_group
)

log.info(f"subvolume earmark is {subvolume_earmark}")

sub_volume_path = ceph_fs_obj.sub_volume.getpath(
volume=fs_name, subvolume=subvolume_name, group_name=subvolume_group
)

create_nfs_export(
clients[0], fs_name, nfs_name, nfs_export, sub_volume_path, ""
)
log.info(
f"nfs export {nfs_export} has been created for subvolume path {nfs_export}"
)

Ceph(clients[0]).nfs.export.delete(nfs_name, nfs_export)
log.info(
f"nfs export {nfs_export} has been deleted for subvolume path {nfs_export}"
)
return 0

except Exception as e:
if "earmark has already been set by smb" in e.args[0] and operation in [
"override_earmark",
"wrong_earmark",
]:
log.info(f"expected failure, earmark has already been set by smb {e}")
return 0
else:
log.error(f"Unexpected {e}")

log.error(f"unable to create nfs cluster {nfs_name} with error {e}")
return 1
finally:
log.info("Cleaning up in progress")
ceph_fs_obj.sub_volume.rm(
volume=fs_name, subvolume=subvolume_name, group=subvolume_group
)
log.info(f"Removed the subvolume {subvolume_name} from group {subvolume_group}")
Ceph(clients[0]).nfs.cluster.delete(nfs_name)
sleep(30)
check_nfs_daemons_removed(clients[0])