diff --git a/slurmutils/models/model.py b/slurmutils/models/model.py index 709075d..53684b4 100644 --- a/slurmutils/models/model.py +++ b/slurmutils/models/model.py @@ -90,7 +90,7 @@ def __init__(self, validator=None, /, **kwargs) -> None: raise ModelError( ( f"unrecognized argument {k}. " - + f"valid arguments are {[opt.name for opt in validator]}" + + f"valid arguments are {list(validator.keys())}" ) ) @@ -114,3 +114,7 @@ def dict(self) -> Dict[str, Any]: def json(self) -> str: """Return model as json object.""" return json.dumps(self.dict()) + + def update(self, other) -> None: + """Update current data model content with content of other data model.""" + self.data.update(other.data) diff --git a/slurmutils/models/slurm.py b/slurmutils/models/slurm.py index 62fe312..52f6753 100644 --- a/slurmutils/models/slurm.py +++ b/slurmutils/models/slurm.py @@ -310,6 +310,20 @@ def partitions(self): """Delete entire partition mapping in the Slurm configuration.""" self.data["Partitions"] = {} + def update(self, other: "SlurmConfig") -> None: + """Update the fields of this model with the fields of another model.""" + for config, value in other.dict().items(): + if config in ["Nodes", "FrontendNodes", "NodeSets", "Partitions"]: + for k, v in value.items(): + data = self.data[config].get(k, {}) + data.update(v) + self.data[config][k] = data + continue + if config == "DownNodes": + self.data["DownNodes"] = self.data["DownNodes"] + value + continue + self.data.update({config: value}) + for opt in NodeOptionSet.keys(): setattr(Node, format_key(opt), property(*generate_descriptors(opt))) diff --git a/tests/unit/editors/test_slurmconfig.py b/tests/unit/editors/test_slurmconfig.py index af89008..86abe5c 100644 --- a/tests/unit/editors/test_slurmconfig.py +++ b/tests/unit/editors/test_slurmconfig.py @@ -236,5 +236,148 @@ def test_edit(self) -> None: for partition in new_partitions: config.partitions.update(partition.dict()) + def test_update(self): + """Test `update` method of the slurmconfig module.""" + config_updates = { + "KillWait": 10, + "PluginDir": "/var/snap/slurm/usr/local/lib:/var/snap/slurm/usr/local/slurm/lib", + "ReturnToService": 0, + "SchedulerType": "sched/builtin", + "SwitchType": "switch/hpe_slingshot", + "WaitTime": 30, + "Nodes": { + "juju-c9fc6f-2": { + "NodeAddr": "10.152.28.98", + "CPUs": "9", + "RealMemory": "9000", + "TmpDisk": "90000", + }, + "juju-c9fc6f-6": { + "NodeAddr": "10.152.28.52", + "CPUs": "9", + "RealMemory": "1000", + "TmpDisk": "10000", + }, + "juju-c9fc6f-7": { + "NodeAddr": "10.152.28.53", + "CPUs": "9", + "RealMemory": "1000", + "TmpDisk": "10000", + }, + }, + "DownNodes": [ + { + "DownNodes": ["juju-c9fc6f-6", "juju-c9fc6f-7"], + "State": "DOWN", + "Reason": "New nodes", + } + ], + "Partitions": { + "DEFAULT": { + "MaxTime": "10", + "MaxNodes": "5", + "State": "UP", + }, + "new_batch": { + "Nodes": ["juju-c9fc6f-6", "juju-c9fc6f-7"], + "MinNodes": "1", + "MaxTime": "120", + "AllowGroups": "admin", + }, + }, + } + + config = slurmconfig.loads(example_slurm_conf) + updates = slurmconfig.SlurmConfig.from_dict(config_updates) + config.update(updates) + + self.assertEqual(config.kill_wait, 10) + self.assertEqual( + config.plugin_dir, + "/var/snap/slurm/usr/local/lib:/var/snap/slurm/usr/local/slurm/lib", + ) + self.assertEqual(config.scheduler_type, "sched/builtin") + self.assertEqual(config.switch_type, "switch/hpe_slingshot") + self.assertEqual(config.wait_time, 30) + + self.assertDictEqual( + config.nodes, + { + "juju-c9fc6f-2": { + "NodeAddr": "10.152.28.98", + "CPUs": "9", + "RealMemory": "9000", + "TmpDisk": "90000", + }, + "juju-c9fc6f-3": { + "NodeAddr": "10.152.28.49", + "CPUs": "1", + "RealMemory": "1000", + "TmpDisk": "10000", + }, + "juju-c9fc6f-4": { + "NodeAddr": "10.152.28.50", + "CPUs": "1", + "RealMemory": "1000", + "TmpDisk": "10000", + }, + "juju-c9fc6f-5": { + "NodeAddr": "10.152.28.51", + "CPUs": "1", + "RealMemory": "1000", + "TmpDisk": "10000", + }, + "juju-c9fc6f-6": { + "NodeAddr": "10.152.28.52", + "CPUs": "9", + "RealMemory": "1000", + "TmpDisk": "10000", + }, + "juju-c9fc6f-7": { + "NodeAddr": "10.152.28.53", + "CPUs": "9", + "RealMemory": "1000", + "TmpDisk": "10000", + }, + }, + ) + self.assertListEqual( + config.down_nodes, + [ + { + "DownNodes": ["juju-c9fc6f-5"], + "State": "DOWN", + "Reason": "Maintenance Mode", + }, + { + "DownNodes": ["juju-c9fc6f-6", "juju-c9fc6f-7"], + "State": "DOWN", + "Reason": "New nodes", + }, + ], + ) + self.assertDictEqual( + config.partitions, + { + "DEFAULT": { + "MaxTime": "10", + "MaxNodes": "5", + "State": "UP", + }, + "batch": { + "Nodes": ["juju-c9fc6f-2", "juju-c9fc6f-3", "juju-c9fc6f-4", "juju-c9fc6f-5"], + "MinNodes": "4", + "MaxTime": "120", + "AllowGroups": ["admin"], + }, + "new_batch": { + "Nodes": ["juju-c9fc6f-6", "juju-c9fc6f-7"], + "MinNodes": "1", + "MaxTime": "120", + "AllowGroups": "admin", + }, + }, + ) + def tearDown(self): Path("slurm.conf").unlink()