diff --git a/.travis.yml b/.travis.yml index 7ab77dd..1f47b31 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,4 +11,5 @@ script: - tox -e $TOX_ENV branches: only: - - master \ No newline at end of file + - master + - canary \ No newline at end of file diff --git a/hpc_plugin/external_repositories/ckan.py b/hpc_plugin/external_repositories/ckan.py index d3afa1c..391cc62 100644 --- a/hpc_plugin/external_repositories/ckan.py +++ b/hpc_plugin/external_repositories/ckan.py @@ -1,22 +1,44 @@ +######## +# Copyright (c) 2017-2018 MSO4SC - javier.carnero@atos.net +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Ckan specific communication to publish data """ + + from external_repository import ExternalRepository class Ckan(ExternalRepository): def __init__(self, publish_item): - super().__init__(publish_item) + super(Ckan, self).__init__(publish_item) self.entrypoint = publish_item['entrypoint'] self.api_key = publish_item['api_key'] self.dataset = publish_item['dataset'] self.file_path = publish_item['file_path'] + self.name = publish_item['name'] + self.description = publish_item["description"] def _build_publish_call(self, logger): # detach call?? operation = "create" # TODO: if resource file exists, operation="update" call = "curl -H'Authorization: " + self.api_key + "' " + \ - "'" + self.entrypoint + "/api/action/resource_" + operation + "' " + \ - "--form upload=@ " + self.file_path + \ - "--form package_id=" + self.package_id + "'" + self.entrypoint + "/api/action/resource_" + \ + operation + "' " + \ + "--form upload=@" + self.file_path + " " + \ + "--form package_id=" + self.dataset + " " + \ + "--form name=" + self.name + " " + \ + "--form description='" + self.description + "'" return call diff --git a/hpc_plugin/external_repositories/external_repository.py b/hpc_plugin/external_repositories/external_repository.py index 878af6f..9937683 100644 --- a/hpc_plugin/external_repositories/external_repository.py +++ b/hpc_plugin/external_repositories/external_repository.py @@ -1,10 +1,27 @@ +######## +# Copyright (c) 2017-2018 MSO4SC - javier.carnero@atos.net +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Holds the external repository common behaviour """ + + from hpc_plugin.ssh import SshClient class ExternalRepository(object): def factory(publish_item): - if publish_item['type'] == "CKAN": # TODO: manage key error + if publish_item['type'] == "CKAN": from ckan import Ckan return Ckan(publish_item) else: @@ -12,7 +29,7 @@ def factory(publish_item): factory = staticmethod(factory) def __init__(self, publish_item): - self.er_type = publish_item['type'] # TODO: manage key error + self.er_type = publish_item['type'] def publish(self, ssh_client, @@ -24,7 +41,7 @@ def publish(self, @type ssh_client: SshClient @param ssh_client: ssh client connected to an HPC login node @rtype string - @return TODO: + @return False if something went wrong """ if not SshClient.check_ssh_client(ssh_client, logger): return False @@ -35,7 +52,8 @@ def publish(self, return ssh_client.execute_shell_command( call, - workdir=workdir) + workdir=workdir, + wait_result=False) # TODO: poner a true def _build_publish_call(self, logger): """ diff --git a/hpc_plugin/ssh.py b/hpc_plugin/ssh.py index daaa9f4..d88d750 100644 --- a/hpc_plugin/ssh.py +++ b/hpc_plugin/ssh.py @@ -1,5 +1,5 @@ ######## -# Copyright (c) 2017 MSO4SC - javier.carnero@atos.net +# Copyright (c) 2017-2018 MSO4SC - javier.carnero@atos.net # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,6 +21,7 @@ """ import select import thread +import logging import cStringIO try: @@ -30,6 +31,8 @@ from paramiko import client, RSAKey +logging.getLogger("paramiko").setLevel(logging.WARNING) + class SshClient(object): """Represents a ssh client""" @@ -94,14 +97,15 @@ def execute_shell_command(self, workdir=None, wait_result=False): if not workdir: - return ssh_client.send_command(cmd, - wait_result=wait_result) + return self.send_command(cmd, + wait_result=wait_result) else: + # TODO: set scale variables as well call = "export CURRENT_WORKDIR=" + workdir + " && " call += "cd " + workdir + " && " call += cmd - return ssh_client.send_command(call, - wait_result=wait_result) + return self.send_command(call, + wait_result=wait_result) def send_command(self, command, diff --git a/hpc_plugin/tasks.py b/hpc_plugin/tasks.py index cc1ff8f..2623c46 100644 --- a/hpc_plugin/tasks.py +++ b/hpc_plugin/tasks.py @@ -1,5 +1,5 @@ ######## -# Copyright (c) 2017 MSO4SC - javier.carnero@atos.net +# Copyright (c) 2017-2018 MSO4SC - javier.carnero@atos.net # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,6 +15,7 @@ """ Holds the plugin tasks """ import requests +import traceback from cloudify import ctx from cloudify.decorators import operation from cloudify.exceptions import NonRecoverableError @@ -72,9 +73,9 @@ def prepare_hpc(config, wm_type + "' not supported.") client = SshClient(config['credentials']) - _, exit_code = wm._execute_shell_command(client, - 'uname', - wait_result=True) + _, exit_code = client.execute_shell_command( + 'uname', + wait_result=True) if exit_code is not 0: client.close_connection() @@ -113,15 +114,14 @@ def cleanup_hpc(config, skip, simulate, **kwargs): # pylint: disable=W0613 wm_type = config['workload_manager'] wm = WorkloadManager.factory(wm_type) if not wm: - client.close_connection() raise NonRecoverableError( "Workload Manager '" + wm_type + "' not supported.") client = SshClient(config['credentials']) - _, exit_code = wm._execute_shell_command(client, - 'rm -r ' + workdir, - wait_result=True) + _, exit_code = client.execute_shell_command( + 'rm -r ' + workdir, + wait_result=True) client.close_connection() ctx.logger.info('..all clean.') else: @@ -322,19 +322,19 @@ def deploy_job(script, call = "./" + name for dinput in inputs: call += ' ' + dinput - _, exit_code = wm._execute_shell_command(client, - call, - workdir=workdir, - wait_result=True) + _, exit_code = client.execute_shell_command( + call, + workdir=workdir, + wait_result=True) if exit_code is not 0: logger.warning( "failed to deploy job: call '" + call + "', exit code " + str(exit_code)) if not skip_cleanup: - if not wm._execute_shell_command(client, - "rm " + name, - workdir=workdir): + if not client.execute_shell_command( + "rm " + name, + workdir=workdir): logger.warning("failed removing bootstrap script") client.close_connection() @@ -393,6 +393,11 @@ def cleanup_job(job_options, skip, **kwargs): # pylint: disable=W0613 try: simulate = ctx.instance.runtime_properties['simulate'] + except KeyError: + # The job wasn't configured properly, so no cleanup needed + ctx.logger.warning('Job was not cleaned up as it was not configured.') + + try: name = kwargs['name'] if not simulate: is_singularity = 'hpc.nodes.singularity_job' in ctx.node.\ @@ -402,7 +407,6 @@ def cleanup_job(job_options, skip, **kwargs): # pylint: disable=W0613 client = SshClient(ctx.instance.runtime_properties['credentials']) - # TODO: manage errors wm = WorkloadManager.factory(wm_type) if not wm: client.close_connection() @@ -428,9 +432,10 @@ def cleanup_job(job_options, skip, **kwargs): # pylint: disable=W0613 else: ctx.logger.error('Job ' + name + ' (' + ctx.instance.id + ') not cleaned.') - except KeyError: - # The job wasn't configured properly, so no cleanup needed - ctx.logger.warning('Job was not cleaned up as it was not configured.') + except Exception as exp: + print(traceback.format_exc()) + ctx.logger.error( + 'Something happend when trying to clean up: ' + exp.message) @operation @@ -438,7 +443,11 @@ def stop_job(job_options, **kwargs): # pylint: disable=W0613 """ Stops a job in the HPC """ try: simulate = ctx.instance.runtime_properties['simulate'] + except KeyError: + # The job wasn't configured properly, no need to be stopped + ctx.logger.warning('Job was not stopped as it was not configured.') + try: name = kwargs['name'] is_singularity = 'hpc.nodes.singularity_job' in ctx.node.\ type_hierarchy @@ -448,7 +457,6 @@ def stop_job(job_options, **kwargs): # pylint: disable=W0613 wm_type = ctx.instance.runtime_properties['workload_manager'] client = SshClient(ctx.instance.runtime_properties['credentials']) - # TODO: manage errors wm = WorkloadManager.factory(wm_type) if not wm: client.close_connection() @@ -476,9 +484,10 @@ def stop_job(job_options, **kwargs): # pylint: disable=W0613 ') not stopped.') raise NonRecoverableError('Job ' + name + ' (' + ctx.instance.id + ') not stopped.') - except KeyError: - # The job wasn't configured properly, no need to be stopped - ctx.logger.warning('Job was not stopped as it was not configured.') + except Exception as exp: + print(traceback.format_exc()) + ctx.logger.error( + 'Something happend when trying to stop: ' + exp.message) @operation @@ -486,38 +495,45 @@ def publish(publish_options, **kwargs): """ Publish the job outputs """ try: simulate = ctx.instance.runtime_properties['simulate'] + except KeyError as exp: + # The job wasn't configured properly, no need to publish + ctx.logger.warning( + 'Job outputs where not published as' + + ' the job was not configured properly.') + return + try: name = kwargs['name'] - is_singularity = 'hpc.nodes.singularity_job' in ctx.node.\ - type_hierarchy - + published = True if not simulate: workdir = ctx.instance.runtime_properties['workdir'] client = SshClient(ctx.instance.runtime_properties['credentials']) for publish_item in publish_options: + if not published: + break er = ExternalRepository.factory(publish_item) if not er: client.close_connection() raise NonRecoverableError( "External repository '" + - publish_item['type'] + # TODO: manage key error + publish_item['type'] + "' not supported.") + published = er.publish(client, ctx.logger, workdir) client.close_connection() else: ctx.logger.warning('Instance ' + ctx.instance.id + ' simulated') - is_stopped = True - if is_stopped: + if published: ctx.logger.info( - 'Job ' + name + ' (' + ctx.instance.id + ') stopped.') + 'Job ' + name + ' (' + ctx.instance.id + ') published.') else: ctx.logger.error('Job ' + name + ' (' + ctx.instance.id + - ') not stopped.') + ') not published.') raise NonRecoverableError('Job ' + name + ' (' + ctx.instance.id + - ') not stopped.') - except KeyError: - # The job wasn't configured properly, no need to be stopped - ctx.logger.warning( - 'Job outputs where not published as it was not configured.') + ') not published.') + except Exception as exp: + print(traceback.format_exc()) + ctx.logger.error( + 'Cannot publish: ' + exp.message) diff --git a/hpc_plugin/tests/blueprint/blueprint-inputs.yaml b/hpc_plugin/tests/blueprint/blueprint-inputs.yaml index e132b34..ee7f105 100644 --- a/hpc_plugin/tests/blueprint/blueprint-inputs.yaml +++ b/hpc_plugin/tests/blueprint/blueprint-inputs.yaml @@ -22,18 +22,8 @@ mso4sc_hpc_primary: country_tz: "Europe/Madrid" workload_manager: "SLURM" -# Second HPC configuration -mso4sc_hpc_secondary: - credentials: - host: "HOST" - user: "USER" - password: "PASSWD" - tunnel: - host: "TUNNEL_HOST" - user: "TUNNEL_USER" - private_key: | - -----BEGIN RSA PRIVATE KEY----- - .... - -----END RSA PRIVATE KEY----- - country_tz: "Europe/Paris" - workload_manager: "SLURM" \ No newline at end of file +mso4sc_datacatalogue_entrypoint: "http://193.144.35.207" + +mso4sc_datacatalogue_key: "****" + +mso4sc_outdataset_outputs_at: "dataset_id" \ No newline at end of file diff --git a/hpc_plugin/tests/blueprint/blueprint_four.yaml b/hpc_plugin/tests/blueprint/blueprint_four.yaml index 29f1cf5..9d67e2a 100644 --- a/hpc_plugin/tests/blueprint/blueprint_four.yaml +++ b/hpc_plugin/tests/blueprint/blueprint_four.yaml @@ -44,11 +44,18 @@ inputs: mso4sc_hpc_primary: description: Configuration for the primary HPC to be used default: {} - - # Second HPC configuration - mso4sc_hpc_secondary: - description: Configuration for the secondary HPC to be used - default: {} + + mso4sc_datacatalogue_entrypoint: + description: entrypoint of the data catalogue + default: "http://193.144.35.207" + + mso4sc_datacatalogue_key: + description: API Key to publish the outputs + default: "" + + mso4sc_outdataset_outputs_at: + description: ID of the CKAN output dataset + default: "" node_templates: first_hpc: diff --git a/hpc_plugin/tests/blueprint/blueprint_four_scale.yaml b/hpc_plugin/tests/blueprint/blueprint_four_scale.yaml index 17b1c40..57170ab 100644 --- a/hpc_plugin/tests/blueprint/blueprint_four_scale.yaml +++ b/hpc_plugin/tests/blueprint/blueprint_four_scale.yaml @@ -44,11 +44,18 @@ inputs: mso4sc_hpc_primary: description: Configuration for the primary HPC to be used default: {} - - # Second HPC configuration - mso4sc_hpc_secondary: - description: Configuration for the secondary HPC to be used - default: {} + + mso4sc_datacatalogue_entrypoint: + description: entrypoint of the data catalogue + default: "http://193.144.35.207" + + mso4sc_datacatalogue_key: + description: API Key to publish the outputs + default: "" + + mso4sc_outdataset_outputs_at: + description: ID of the CKAN output dataset + default: "" node_templates: first_hpc: diff --git a/hpc_plugin/tests/blueprint/blueprint_sbatch.yaml b/hpc_plugin/tests/blueprint/blueprint_sbatch.yaml index de8583b..42fa964 100644 --- a/hpc_plugin/tests/blueprint/blueprint_sbatch.yaml +++ b/hpc_plugin/tests/blueprint/blueprint_sbatch.yaml @@ -44,11 +44,18 @@ inputs: mso4sc_hpc_primary: description: Configuration for the primary HPC to be used default: {} - - # Second HPC configuration - mso4sc_hpc_secondary: - description: Configuration for the secondary HPC to be used - default: {} + + mso4sc_datacatalogue_entrypoint: + description: entrypoint of the data catalogue + default: "http://193.144.35.207" + + mso4sc_datacatalogue_key: + description: API Key to publish the outputs + default: "" + + mso4sc_outdataset_outputs_at: + description: ID of the CKAN output dataset + default: "" node_templates: first_hpc: diff --git a/hpc_plugin/tests/blueprint/blueprint_sbatch_output.yaml b/hpc_plugin/tests/blueprint/blueprint_sbatch_output.yaml index 682cb79..b9cb317 100644 --- a/hpc_plugin/tests/blueprint/blueprint_sbatch_output.yaml +++ b/hpc_plugin/tests/blueprint/blueprint_sbatch_output.yaml @@ -44,20 +44,16 @@ inputs: description: Configuration for the primary HPC to be used default: {} - mso4sc_datacatalogue: + mso4sc_datacatalogue_entrypoint: description: entrypoint of the data catalogue - default: "" + default: "http://193.144.35.207" - mso4sc_publish_key: + mso4sc_datacatalogue_key: description: API Key to publish the outputs default: "" - publish_dataset_id: - description: ID of the CKAN dataset - default: "" - - publish_outputs_path: - description: Local path to the outputs to be uploaded + mso4sc_outdataset_outputs_at: + description: ID of the CKAN output dataset default: "" node_templates: @@ -85,10 +81,12 @@ node_templates: - { get_input: partition } publish: - type: "CKAN" - entrypoint: { get_input: mso4sc_datacatalogue} - api_key: { get_input: mso4sc_publish_key } - dataset: { get_input: publish_dataset_id } - file_path: { get_input: publish_outputs_path } + entrypoint: { get_input: mso4sc_datacatalogue_entrypoint} + api_key: { get_input: mso4sc_datacatalogue_key } + dataset: { get_input: mso4sc_outdataset_outputs_at } + file_path: "$CURRENT_WORKDIR/test_single.test" + name: "test_single" + description: "output test" skip_cleanup: True relationships: - type: job_contained_in_hpc diff --git a/hpc_plugin/tests/blueprint/blueprint_sbatch_scale.yaml b/hpc_plugin/tests/blueprint/blueprint_sbatch_scale.yaml index f5d5ed7..6576c69 100644 --- a/hpc_plugin/tests/blueprint/blueprint_sbatch_scale.yaml +++ b/hpc_plugin/tests/blueprint/blueprint_sbatch_scale.yaml @@ -44,11 +44,18 @@ inputs: mso4sc_hpc_primary: description: Configuration for the primary HPC to be used default: {} - - # Second HPC configuration - mso4sc_hpc_secondary: - description: Configuration for the secondary HPC to be used - default: {} + + mso4sc_datacatalogue_entrypoint: + description: entrypoint of the data catalogue + default: "http://193.144.35.207" + + mso4sc_datacatalogue_key: + description: API Key to publish the outputs + default: "" + + mso4sc_outdataset_outputs_at: + description: ID of the CKAN output dataset + default: "" node_templates: first_hpc: diff --git a/hpc_plugin/tests/blueprint/blueprint_singularity.yaml b/hpc_plugin/tests/blueprint/blueprint_singularity.yaml index 2f2340a..43416fd 100644 --- a/hpc_plugin/tests/blueprint/blueprint_singularity.yaml +++ b/hpc_plugin/tests/blueprint/blueprint_singularity.yaml @@ -44,11 +44,18 @@ inputs: mso4sc_hpc_primary: description: Configuration for the primary HPC to be used default: {} - - # Second HPC configuration - mso4sc_hpc_secondary: - description: Configuration for the secondary HPC to be used - default: {} + + mso4sc_datacatalogue_entrypoint: + description: entrypoint of the data catalogue + default: "http://193.144.35.207" + + mso4sc_datacatalogue_key: + description: API Key to publish the outputs + default: "" + + mso4sc_outdataset_outputs_at: + description: ID of the CKAN output dataset + default: "" node_templates: first_hpc: diff --git a/hpc_plugin/tests/blueprint/blueprint_singularity_scale.yaml b/hpc_plugin/tests/blueprint/blueprint_singularity_scale.yaml index 4e7a1b1..2f356d9 100644 --- a/hpc_plugin/tests/blueprint/blueprint_singularity_scale.yaml +++ b/hpc_plugin/tests/blueprint/blueprint_singularity_scale.yaml @@ -44,11 +44,18 @@ inputs: mso4sc_hpc_primary: description: Configuration for the primary HPC to be used default: {} - - # Second HPC configuration - mso4sc_hpc_secondary: - description: Configuration for the secondary HPC to be used - default: {} + + mso4sc_datacatalogue_entrypoint: + description: entrypoint of the data catalogue + default: "http://193.144.35.207" + + mso4sc_datacatalogue_key: + description: API Key to publish the outputs + default: "" + + mso4sc_outdataset_outputs_at: + description: ID of the CKAN output dataset + default: "" node_templates: first_hpc: diff --git a/hpc_plugin/tests/blueprint/blueprint_srun.yaml b/hpc_plugin/tests/blueprint/blueprint_srun.yaml index a2fbd1f..60e2f92 100644 --- a/hpc_plugin/tests/blueprint/blueprint_srun.yaml +++ b/hpc_plugin/tests/blueprint/blueprint_srun.yaml @@ -44,11 +44,18 @@ inputs: mso4sc_hpc_primary: description: Configuration for the primary HPC to be used default: {} - - # Second HPC configuration - mso4sc_hpc_secondary: - description: Configuration for the secondary HPC to be used - default: {} + + mso4sc_datacatalogue_entrypoint: + description: entrypoint of the data catalogue + default: "http://193.144.35.207" + + mso4sc_datacatalogue_key: + description: API Key to publish the outputs + default: "" + + mso4sc_outdataset_outputs_at: + description: ID of the CKAN output dataset + default: "" node_templates: first_hpc: diff --git a/hpc_plugin/tests/blueprint/scripts/bootstrap_sbatch_example.sh b/hpc_plugin/tests/blueprint/scripts/bootstrap_sbatch_example.sh index c94317f..b7fc1c1 100644 --- a/hpc_plugin/tests/blueprint/scripts/bootstrap_sbatch_example.sh +++ b/hpc_plugin/tests/blueprint/scripts/bootstrap_sbatch_example.sh @@ -9,10 +9,9 @@ cat > $FILE <<- EOM #SBATCH -N 1 #SBATCH -n 1 #SBATCH --ntasks-per-node=1 -#SBATCH -t 00:15:00 +#SBATCH -t 00:01:00 # DYNAMIC VARIABLES -sleep 900 touch test_$1.test EOM diff --git a/hpc_plugin/tests/workflow_tests.py b/hpc_plugin/tests/workflow_tests.py index ad6ebf2..8ae9dbb 100644 --- a/hpc_plugin/tests/workflow_tests.py +++ b/hpc_plugin/tests/workflow_tests.py @@ -104,6 +104,36 @@ def test_sbatch(self, cfy_local): True) else: logging.warning('[WARNING] Login could not be tested') + + @workflow_test(os.path.join('blueprint', 'blueprint_sbatch_output.yaml'), + resources_to_copy=[(os.path.join('blueprint', 'hpc_plugin', + 'test_plugin.yaml'), + 'hpc_plugin'), + (os.path.join('blueprint', 'scripts', + 'bootstrap_' + + 'sbatch_example.sh'), + 'scripts'), + (os.path.join('blueprint', 'scripts', + 'revert_' + + 'sbatch_example.sh'), + 'scripts')], + inputs='set_inputs') + def test_sbatch_output(self, cfy_local): + """ Install & Run workflows. """ + cfy_local.execute('install', task_retries=0) + cfy_local.execute('run_jobs', task_retries=0) + cfy_local.execute('uninstall', task_retries=0) + + # extract single node instance + instance = cfy_local.storage.get_node_instances()[0] + + # due to a cfy bug sometimes login keyword is not ready in the tests + if 'login' in instance.runtime_properties: + # assert runtime properties is properly set in node instance + self.assertEqual(instance.runtime_properties['login'], + True) + else: + logging.warning('[WARNING] Login could not be tested') @workflow_test(os.path.join('blueprint', 'blueprint_sbatch_scale.yaml'), resources_to_copy=[(os.path.join('blueprint', 'hpc_plugin', diff --git a/hpc_plugin/workflows.py b/hpc_plugin/workflows.py index 2ae25f0..597faba 100644 --- a/hpc_plugin/workflows.py +++ b/hpc_plugin/workflows.py @@ -123,6 +123,9 @@ def set_status(self, status): self.completed = not self.parent_node.is_job or \ self._status == 'COMPLETED' + + if self.completed: + self.publish() # TODO: do it in another thread? if not self.parent_node.is_job: self.failed = False @@ -217,16 +220,16 @@ def queue_all_instances(self): self.status = 'QUEUED' return tasks_list - def publish(self): - """ Send all instances to the HPC queue if it represents a Job """ - if not self.is_job: - return [] + # def publish(self): + # """ Send all instances to the HPC queue if it represents a Job """ + # if not self.is_job: + # return [] - tasks_list = [] - for job_instance in self.instances: - tasks_list.append(job_instance.publish()) + # tasks_list = [] + # for job_instance in self.instances: + # tasks_list.append(job_instance.publish()) - return tasks_list + # return tasks_list def is_ready(self): """ True if it has no more dependencies to satisfy """ diff --git a/hpc_plugin/workload_managers/slurm.py b/hpc_plugin/workload_managers/slurm.py index deae7de..cd90f1a 100644 --- a/hpc_plugin/workload_managers/slurm.py +++ b/hpc_plugin/workload_managers/slurm.py @@ -202,9 +202,9 @@ def get_states(self, ssh_client, names, logger): # TODO(emepetres) set start time of consulting # (sacct only check current day) call = "sacct -n -o JobName,State -X -P --name=" + ','.join(names) - output, exit_code = self._execute_shell_command(ssh_client, - call, - wait_result=True) + output, exit_code = ssh_client.execute_shell_command( + call, + wait_result=True) states = {} if exit_code == 0: