Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add update method to update configs from other models #16

Merged
merged 4 commits into from
Aug 29, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion slurmutils/models/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def __init__(self, validator=None, /, **kwargs) -> None:
raise ModelError(
(
f"unrecognized argument {k}. "
+ f"valid arguments are {[opt.name for opt in validator]}"
+ f"valid arguments are {list(validator.keys())}"
)
)

Expand All @@ -114,3 +114,7 @@ def dict(self) -> Dict[str, Any]:
def json(self) -> str:
"""Return model as json object."""
return json.dumps(self.dict())

def update(self, other) -> None:
"""Update current data model content with content of other data model."""
self.data.update(other.data)
14 changes: 14 additions & 0 deletions slurmutils/models/slurm.py
Original file line number Diff line number Diff line change
Expand Up @@ -310,6 +310,20 @@ def partitions(self):
"""Delete entire partition mapping in the Slurm configuration."""
self.data["Partitions"] = {}

def update(self, other: "SlurmConfig") -> None:
"""Update the fields of this model with the fields of another model."""
for config, value in other.dict().items():
if config in ["Nodes", "FrontendNodes", "NodeSets", "Partitions"]:
for k, v in value.items():
data = self.data[config].get(k, {})
data.update(v)
self.data[config][k] = data
continue
if config == "DownNodes":
self.data["DownNodes"] = self.data["DownNodes"] + value
continue
self.data.update({config: value})


for opt in NodeOptionSet.keys():
setattr(Node, format_key(opt), property(*generate_descriptors(opt)))
Expand Down
143 changes: 143 additions & 0 deletions tests/unit/editors/test_slurmconfig.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,5 +236,148 @@ def test_edit(self) -> None:
for partition in new_partitions:
config.partitions.update(partition.dict())

def test_update(self):
"""Test `update` method of the slurmconfig module."""
config_updates = {
"KillWait": 10,
"PluginDir": "/var/snap/slurm/usr/local/lib:/var/snap/slurm/usr/local/slurm/lib",
"ReturnToService": 0,
"SchedulerType": "sched/builtin",
"SwitchType": "switch/hpe_slingshot",
"WaitTime": 30,
"Nodes": {
"juju-c9fc6f-2": {
"NodeAddr": "10.152.28.98",
"CPUs": "9",
"RealMemory": "9000",
"TmpDisk": "90000",
},
"juju-c9fc6f-6": {
"NodeAddr": "10.152.28.52",
"CPUs": "9",
"RealMemory": "1000",
"TmpDisk": "10000",
},
"juju-c9fc6f-7": {
"NodeAddr": "10.152.28.53",
"CPUs": "9",
"RealMemory": "1000",
"TmpDisk": "10000",
},
},
"DownNodes": [
{
"DownNodes": ["juju-c9fc6f-6", "juju-c9fc6f-7"],
"State": "DOWN",
"Reason": "New nodes",
}
],
"Partitions": {
"DEFAULT": {
"MaxTime": "10",
"MaxNodes": "5",
"State": "UP",
},
"new_batch": {
"Nodes": ["juju-c9fc6f-6", "juju-c9fc6f-7"],
"MinNodes": "1",
"MaxTime": "120",
"AllowGroups": "admin",
},
},
}

config = slurmconfig.loads(example_slurm_conf)
updates = slurmconfig.SlurmConfig.from_dict(config_updates)
config.update(updates)

self.assertEqual(config.kill_wait, 10)
self.assertEqual(
config.plugin_dir,
"/var/snap/slurm/usr/local/lib:/var/snap/slurm/usr/local/slurm/lib",
)
self.assertEqual(config.scheduler_type, "sched/builtin")
self.assertEqual(config.switch_type, "switch/hpe_slingshot")
self.assertEqual(config.wait_time, 30)

self.assertDictEqual(
config.nodes,
{
"juju-c9fc6f-2": {
"NodeAddr": "10.152.28.98",
"CPUs": "9",
"RealMemory": "9000",
"TmpDisk": "90000",
},
"juju-c9fc6f-3": {
"NodeAddr": "10.152.28.49",
"CPUs": "1",
"RealMemory": "1000",
"TmpDisk": "10000",
},
"juju-c9fc6f-4": {
"NodeAddr": "10.152.28.50",
"CPUs": "1",
"RealMemory": "1000",
"TmpDisk": "10000",
},
"juju-c9fc6f-5": {
"NodeAddr": "10.152.28.51",
"CPUs": "1",
"RealMemory": "1000",
"TmpDisk": "10000",
},
"juju-c9fc6f-6": {
"NodeAddr": "10.152.28.52",
"CPUs": "9",
"RealMemory": "1000",
"TmpDisk": "10000",
},
"juju-c9fc6f-7": {
"NodeAddr": "10.152.28.53",
"CPUs": "9",
"RealMemory": "1000",
"TmpDisk": "10000",
},
},
)
self.assertListEqual(
config.down_nodes,
[
{
"DownNodes": ["juju-c9fc6f-5"],
"State": "DOWN",
"Reason": "Maintenance Mode",
},
{
"DownNodes": ["juju-c9fc6f-6", "juju-c9fc6f-7"],
"State": "DOWN",
"Reason": "New nodes",
},
],
)
self.assertDictEqual(
config.partitions,
{
"DEFAULT": {
"MaxTime": "10",
"MaxNodes": "5",
"State": "UP",
},
"batch": {
"Nodes": ["juju-c9fc6f-2", "juju-c9fc6f-3", "juju-c9fc6f-4", "juju-c9fc6f-5"],
"MinNodes": "4",
"MaxTime": "120",
"AllowGroups": ["admin"],
},
"new_batch": {
"Nodes": ["juju-c9fc6f-6", "juju-c9fc6f-7"],
"MinNodes": "1",
"MaxTime": "120",
"AllowGroups": "admin",
},
},
)

def tearDown(self):
Path("slurm.conf").unlink()