From b5670969ef37b28b230d7321dd0d4c2bcbacb83f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Philipp=20R=C3=BCssmann?=
Date: Thu, 31 Oct 2019 15:46:36 +0100
Subject: [PATCH 1/9] Add Codacy settings, deactivate tests with Rabbit MQ for
python 3.7 tests
---
.codacy.yml | 3 +
.travis.yml | 1 +
aiida_kkr/tests/run_all.sh | 31 +++++---
aiida_kkr/tests/test_common_workfunctions.py | 56 --------------
.../test_common_workfunctions_with_rmq.py | 74 +++++++++++++++++++
5 files changed, 99 insertions(+), 66 deletions(-)
create mode 100644 .codacy.yml
create mode 100644 aiida_kkr/tests/test_common_workfunctions_with_rmq.py
diff --git a/.codacy.yml b/.codacy.yml
new file mode 100644
index 00000000..721a6252
--- /dev/null
+++ b/.codacy.yml
@@ -0,0 +1,3 @@
+
+exclude_paths:
+ - 'aiida_kkr/tests/**'
diff --git a/.travis.yml b/.travis.yml
index 1f743331..ffd2b802 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -12,6 +12,7 @@ matrix:
include:
- python: 3.7
dist: xenial
+ env: NO_RMQ='t'
notifications:
slack:
on_success: never
diff --git a/aiida_kkr/tests/run_all.sh b/aiida_kkr/tests/run_all.sh
index 7f2025ac..2b468eee 100755
--- a/aiida_kkr/tests/run_all.sh
+++ b/aiida_kkr/tests/run_all.sh
@@ -51,6 +51,11 @@ else
else
echo "skip workflows tests using KKRimp (set 'RUN_KKRIMP' env to activate this)"
fi
+ if [[ ! -z "$NO_RMQ" ]]; then
+ echo "do not run workflows and workfuntions that need rabbitMQ (unset 'NO_RMQ' env to prevent this)"
+ else
+ echo "run workflows and workfunctions also with rabbitMQ (set 'NO_RMQ' env to deactivate this)"
+ fi
fi
echo "============="
echo
@@ -63,18 +68,24 @@ if [[ ! -z "$RUN_ALL" ]]; then
pytest --cov-report=term-missing --cov=aiida_kkr --cov-append --ignore=jukkr --ignore=workflows --mpl -p no:warnings $addopt # then run non-workflow tests
else
# tests without running actual calculations
- if [[ -z "$SKIP_NOWORK" ]]; then
+ if [[ -z "$SKIP_NOWORK" ]] && [[ -z "$NO_RMQ" ]]; then
echo "run non-workflow tests"
pytest --cov-report=term-missing --cov=aiida_kkr --ignore=workflows --ignore=jukkr --mpl -p no:warnings $addopt
else
- echo "skipping tests that are not workflows"
+ # skip things that need rabbitMQ
+ if [[ -z "$SKIP_NOWORK" ]] && [[ ! -z "$NO_RMQ" ]]; then
+ echo "run non-workflow tests"
+ pytest --cov-report=term-missing --cov=aiida_kkr --ignore=workflows --ignore=jukkr --ignore=calculations --ignore=test_common_workfunctions_with_rmq.py --ignore=test_plot_kkr.py --mpl -p no:warnings $addopt
+ else
+ echo "skipping tests that are not workflows"
+ fi
fi
# test running full workflows, need compiled codes and execute them
# tests using only voronoi
- if [[ ! -z "$RUN_VORONOI" ]]; then
+ if [[ ! -z "$RUN_VORONOI" ]] && [[ ! -z "$NO_RMQ" ]]; then
echo "run vorostart workflow test"
pytest --cov-report=term-missing --cov-append --cov=aiida_kkr --ignore=jukkr -k Test_vorostart_workflow $addopt
else
@@ -83,25 +94,25 @@ else
# tests using kkrhost (and voronoi)
- if [[ ! -z "$RUN_KKRHOST" ]]; then
+ if [[ ! -z "$RUN_KKRHOST" ]] && [[ ! -z "$NO_RMQ" ]]; then
echo "run kkr_dos workflow test"
pytest --cov-report=term-missing --cov-append --cov=aiida_kkr --ignore=jukkr -k Test_dos_workflow $addopt
else
echo "skipping kkr_dos workflow test"
fi
- if [[ ! -z "$RUN_KKRHOST" ]]; then
+ if [[ ! -z "$RUN_KKRHOST" ]] && [[ ! -z "$NO_RMQ" ]]; then
echo "run kkr_gf_writeout workflow test"
pytest --cov-report=term-missing --cov-append --cov=aiida_kkr --ignore=jukkr -k Test_gf_writeout_workflow $addopt
else
echo "skipping kkr_gf_writeout workflow test"
fi
- if [[ ! -z "$RUN_VORONOI" ]] && [[ ! -z "$RUN_KKRHOST" ]]; then
+ if [[ ! -z "$RUN_VORONOI" ]] && [[ ! -z "$RUN_KKRHOST" ]] && [[ ! -z "$NO_RMQ" ]]; then
echo "run kkr_scf workflow test"
pytest --cov-report=term-missing --cov-append --cov=aiida_kkr --ignore=jukkr -k Test_scf_workflow $addopt
else
echo "skipping kkr_scf workflow test"
fi
- if [[ ! -z "$RUN_VORONOI" ]] && [[ ! -z "$RUN_KKRHOST" ]]; then
+ if [[ ! -z "$RUN_VORONOI" ]] && [[ ! -z "$RUN_KKRHOST" ]] && [[ ! -z "$NO_RMQ" ]]; then
echo "run kkr_eos workflow test"
pytest --cov-report=term-missing --cov-append --cov=aiida_kkr --ignore=jukkr -k Test_eos_workflow $addopt
else
@@ -110,19 +121,19 @@ else
# tests using kkrimp (and kkrhost/voronoi)
- if [[ ! -z "$RUN_KKRIMP" ]]; then
+ if [[ ! -z "$RUN_KKRIMP" ]] && [[ ! -z "$NO_RMQ" ]]; then
echo "run kkrimp_scf workflow test"
pytest --cov-report=term-missing --cov-append --cov=aiida_kkr --ignore=jukkr -k Test_kkrimp_scf_workflow $addopt
else
echo "skipping kkrimp_scf workflow test"
fi
- if [[ ! -z "$RUN_KKRIMP" ]] && [[ ! -z "$RUN_KKRHOST" ]] && [[ ! -z "$RUN_VORONOI" ]]; then
+ if [[ ! -z "$RUN_KKRIMP" ]] && [[ ! -z "$RUN_KKRHOST" ]] && [[ ! -z "$RUN_VORONOI" ]] && [[ ! -z "$NO_RMQ" ]]; then
echo "run kkrimp_full workflow test"
pytest --cov-report=term-missing --cov-append --cov=aiida_kkr --ignore=jukkr -k Test_kkrimp_full_workflow $addopt
else
echo "skipping kkrimp_full workflow test"
fi
- if [[ ! -z "$RUN_KKRIMP" ]] && [[ ! -z "$RUN_KKRHOST" ]]; then
+ if [[ ! -z "$RUN_KKRIMP" ]] && [[ ! -z "$RUN_KKRHOST" ]] && [[ ! -z "$NO_RMQ" ]]; then
echo "run kkrimp_dos workflow test"
pytest --cov-report=term-missing --cov-append --cov=aiida_kkr --ignore=jukkr -k Test_kkrimp_dos_workflow $addopt
else
diff --git a/aiida_kkr/tests/test_common_workfunctions.py b/aiida_kkr/tests/test_common_workfunctions.py
index 07c2cfb3..ad7d24c3 100644
--- a/aiida_kkr/tests/test_common_workfunctions.py
+++ b/aiida_kkr/tests/test_common_workfunctions.py
@@ -141,62 +141,6 @@ def test_check_2Dinput_consistency_5(self):
assert list(input_check[1]).sort() == list("3D info given in parameters but structure is 2D\nstructure is 2D? {}\ninput has 2D info? {}\nset keys are: {}".format(False, True, ['ZPERIODL', '', '', 'INTERFACE', '', 'ZPERIODR', ''])).sort()
- def test_update_params_wf(self):
- from aiida_kkr.tools.common_workfunctions import update_params_wf
- from masci_tools.io.kkr_params import kkrparams
- from aiida.plugins import DataFactory
- Dict = DataFactory('dict')
-
- k = kkrparams(LMAX=2)
- node1 = Dict(dict=k.values)
- node2 = Dict(dict={'nodename': 'my_changed_name', 'nodedesc': 'My description text', 'EMIN': -1, 'RMAX': 10.})
-
- unode = update_params_wf(node1, node1)
- assert unode.get_dict() == node1.get_dict()
-
- unode = update_params_wf(node1, node2)
-
- d0 = node1.get_dict()
- for i in list(d0.keys()):
- if d0[i] is None:
- d0.pop(i)
-
- d1 = unode.get_dict()
- for i in list(d1.keys()):
- if d1[i] is None:
- d1.pop(i)
-
- l_identical, l_diff = [], []
- for i in list(d0.keys()):
- if i in list(d1.keys()):
- l_identical.append([i, d0[i], d1[i]])
- else:
- l_diff.append([0, i, d0[i]])
- for i in list(d1.keys()):
- if i not in list(d0.keys()):
- l_diff.append([1, i, d1[i]])
-
- assert l_identical == [[u'LMAX', 2, 2]]
- assert l_diff.sort() == [[1, u'RMAX', 10.0], [1, u'EMIN', -1.0]].sort()
- return node1, node2, unode
-
-
- def test_neworder_potential_wf(self):
- from numpy import loadtxt
- from aiida.orm import load_node
- from aiida.plugins import DataFactory
- from aiida_kkr.tools.common_workfunctions import neworder_potential_wf
- from aiida.tools.importexport import import_data
- Dict = DataFactory('dict')
- import_data('files/db_dump_kkrflex_create.tar.gz')
- GF_host_calc = load_node('baabef05-f418-4475-bba5-ef0ee3fd5ca6').outputs
- neworder_pot1 = [int(i) for i in loadtxt(GF_host_calc.retrieved.open('scoef'), skiprows=1)[:,3]-1]
- settings_dict = {'pot1': 'out_potential', 'out_pot': 'potential_imp', 'neworder': neworder_pot1}
- settings = Dict(dict=settings_dict)
- startpot_imp_sfd = neworder_potential_wf(settings_node=settings, parent_calc_folder=GF_host_calc.remote_folder)
- assert startpot_imp_sfd.get_object_content(startpot_imp_sfd.filename)[::1000] == u'C12807143D556463084.6+55 7D117 9D-87 0+25\n20.70351.75\n0521259.2+491.0-462. 02621D74112D03547T00 4D02116D502 6D39\n96.20261.50941.4944.7+30 98-29 .5-3625D07193.58104D0773D27252285417D341 9.506544D548447094.9+38 91063 54-08 6D28277.60909.98111'
-
-
def test_vca_check(self):
from aiida_kkr.tools.common_workfunctions import vca_check
pass
diff --git a/aiida_kkr/tests/test_common_workfunctions_with_rmq.py b/aiida_kkr/tests/test_common_workfunctions_with_rmq.py
new file mode 100644
index 00000000..128528ad
--- /dev/null
+++ b/aiida_kkr/tests/test_common_workfunctions_with_rmq.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+"""
+@author: ruess
+"""
+from __future__ import print_function
+from __future__ import absolute_import
+from builtins import object
+import pytest
+from six.moves import range
+from numpy import sort
+
+@pytest.mark.usefixtures("aiida_env")
+class Test_common_workfunctions_rmq(object):
+ """
+ Tests for the common workfunctions from tools.common_workfunctions,
+ i.e. functions commonly used in this plugin that depend on aiida stuff to work
+ these tests use rabbitMQ
+ """
+
+ def test_update_params_wf(self):
+ from aiida_kkr.tools.common_workfunctions import update_params_wf
+ from masci_tools.io.kkr_params import kkrparams
+ from aiida.plugins import DataFactory
+ Dict = DataFactory('dict')
+
+ k = kkrparams(LMAX=2)
+ node1 = Dict(dict=k.values)
+ node2 = Dict(dict={'nodename': 'my_changed_name', 'nodedesc': 'My description text', 'EMIN': -1, 'RMAX': 10.})
+
+ unode = update_params_wf(node1, node1)
+ assert unode.get_dict() == node1.get_dict()
+
+ unode = update_params_wf(node1, node2)
+
+ d0 = node1.get_dict()
+ for i in list(d0.keys()):
+ if d0[i] is None:
+ d0.pop(i)
+
+ d1 = unode.get_dict()
+ for i in list(d1.keys()):
+ if d1[i] is None:
+ d1.pop(i)
+
+ l_identical, l_diff = [], []
+ for i in list(d0.keys()):
+ if i in list(d1.keys()):
+ l_identical.append([i, d0[i], d1[i]])
+ else:
+ l_diff.append([0, i, d0[i]])
+ for i in list(d1.keys()):
+ if i not in list(d0.keys()):
+ l_diff.append([1, i, d1[i]])
+
+ assert l_identical == [[u'LMAX', 2, 2]]
+ assert l_diff.sort() == [[1, u'RMAX', 10.0], [1, u'EMIN', -1.0]].sort()
+ return node1, node2, unode
+
+
+ def test_neworder_potential_wf(self):
+ from numpy import loadtxt
+ from aiida.orm import load_node
+ from aiida.plugins import DataFactory
+ from aiida_kkr.tools.common_workfunctions import neworder_potential_wf
+ from aiida.tools.importexport import import_data
+ Dict = DataFactory('dict')
+ import_data('files/db_dump_kkrflex_create.tar.gz')
+ GF_host_calc = load_node('baabef05-f418-4475-bba5-ef0ee3fd5ca6').outputs
+ neworder_pot1 = [int(i) for i in loadtxt(GF_host_calc.retrieved.open('scoef'), skiprows=1)[:,3]-1]
+ settings_dict = {'pot1': 'out_potential', 'out_pot': 'potential_imp', 'neworder': neworder_pot1}
+ settings = Dict(dict=settings_dict)
+ startpot_imp_sfd = neworder_potential_wf(settings_node=settings, parent_calc_folder=GF_host_calc.remote_folder)
+ assert startpot_imp_sfd.get_object_content(startpot_imp_sfd.filename)[::1000] == u'C12807143D556463084.6+55 7D117 9D-87 0+25\n20.70351.75\n0521259.2+491.0-462. 02621D74112D03547T00 4D02116D502 6D39\n96.20261.50941.4944.7+30 98-29 .5-3625D07193.58104D0773D27252285417D341 9.506544D548447094.9+38 91063 54-08 6D28277.60909.98111'
+
From 7dc389d6b868a27249020db842e6e0b0088f18c6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Philipp=20R=C3=BCssmann?=
Date: Thu, 31 Oct 2019 15:54:14 +0100
Subject: [PATCH 2/9] Fix NO_RMQ mode for workflow tests
---
aiida_kkr/tests/run_all.sh | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/aiida_kkr/tests/run_all.sh b/aiida_kkr/tests/run_all.sh
index 2b468eee..46b65665 100755
--- a/aiida_kkr/tests/run_all.sh
+++ b/aiida_kkr/tests/run_all.sh
@@ -85,7 +85,7 @@ else
# tests using only voronoi
- if [[ ! -z "$RUN_VORONOI" ]] && [[ ! -z "$NO_RMQ" ]]; then
+ if [[ ! -z "$RUN_VORONOI" ]] && [[ -z "$NO_RMQ" ]]; then
echo "run vorostart workflow test"
pytest --cov-report=term-missing --cov-append --cov=aiida_kkr --ignore=jukkr -k Test_vorostart_workflow $addopt
else
@@ -94,25 +94,25 @@ else
# tests using kkrhost (and voronoi)
- if [[ ! -z "$RUN_KKRHOST" ]] && [[ ! -z "$NO_RMQ" ]]; then
+ if [[ ! -z "$RUN_KKRHOST" ]] && [[ -z "$NO_RMQ" ]]; then
echo "run kkr_dos workflow test"
pytest --cov-report=term-missing --cov-append --cov=aiida_kkr --ignore=jukkr -k Test_dos_workflow $addopt
else
echo "skipping kkr_dos workflow test"
fi
- if [[ ! -z "$RUN_KKRHOST" ]] && [[ ! -z "$NO_RMQ" ]]; then
+ if [[ ! -z "$RUN_KKRHOST" ]] && [[ -z "$NO_RMQ" ]]; then
echo "run kkr_gf_writeout workflow test"
pytest --cov-report=term-missing --cov-append --cov=aiida_kkr --ignore=jukkr -k Test_gf_writeout_workflow $addopt
else
echo "skipping kkr_gf_writeout workflow test"
fi
- if [[ ! -z "$RUN_VORONOI" ]] && [[ ! -z "$RUN_KKRHOST" ]] && [[ ! -z "$NO_RMQ" ]]; then
+ if [[ ! -z "$RUN_VORONOI" ]] && [[ ! -z "$RUN_KKRHOST" ]] && [[ -z "$NO_RMQ" ]]; then
echo "run kkr_scf workflow test"
pytest --cov-report=term-missing --cov-append --cov=aiida_kkr --ignore=jukkr -k Test_scf_workflow $addopt
else
echo "skipping kkr_scf workflow test"
fi
- if [[ ! -z "$RUN_VORONOI" ]] && [[ ! -z "$RUN_KKRHOST" ]] && [[ ! -z "$NO_RMQ" ]]; then
+ if [[ ! -z "$RUN_VORONOI" ]] && [[ ! -z "$RUN_KKRHOST" ]] && [[ -z "$NO_RMQ" ]]; then
echo "run kkr_eos workflow test"
pytest --cov-report=term-missing --cov-append --cov=aiida_kkr --ignore=jukkr -k Test_eos_workflow $addopt
else
@@ -121,19 +121,19 @@ else
# tests using kkrimp (and kkrhost/voronoi)
- if [[ ! -z "$RUN_KKRIMP" ]] && [[ ! -z "$NO_RMQ" ]]; then
+ if [[ ! -z "$RUN_KKRIMP" ]] && [[ -z "$NO_RMQ" ]]; then
echo "run kkrimp_scf workflow test"
pytest --cov-report=term-missing --cov-append --cov=aiida_kkr --ignore=jukkr -k Test_kkrimp_scf_workflow $addopt
else
echo "skipping kkrimp_scf workflow test"
fi
- if [[ ! -z "$RUN_KKRIMP" ]] && [[ ! -z "$RUN_KKRHOST" ]] && [[ ! -z "$RUN_VORONOI" ]] && [[ ! -z "$NO_RMQ" ]]; then
+ if [[ ! -z "$RUN_KKRIMP" ]] && [[ ! -z "$RUN_KKRHOST" ]] && [[ ! -z "$RUN_VORONOI" ]] && [[ -z "$NO_RMQ" ]]; then
echo "run kkrimp_full workflow test"
pytest --cov-report=term-missing --cov-append --cov=aiida_kkr --ignore=jukkr -k Test_kkrimp_full_workflow $addopt
else
echo "skipping kkrimp_full workflow test"
fi
- if [[ ! -z "$RUN_KKRIMP" ]] && [[ ! -z "$RUN_KKRHOST" ]] && [[ ! -z "$NO_RMQ" ]]; then
+ if [[ ! -z "$RUN_KKRIMP" ]] && [[ ! -z "$RUN_KKRHOST" ]] && [[ -z "$NO_RMQ" ]]; then
echo "run kkrimp_dos workflow test"
pytest --cov-report=term-missing --cov-append --cov=aiida_kkr --ignore=jukkr -k Test_kkrimp_dos_workflow $addopt
else
From 8fa4912abc1516d62c4e05952c4f2ae9839a6093 Mon Sep 17 00:00:00 2001
From: ruess
Date: Mon, 18 Nov 2019 15:17:21 +0100
Subject: [PATCH 3/9] Fix kkrimp mode to reuse uploaded kkrflex_green and tmat
files
---
aiida_kkr/calculations/kkrimp.py | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/aiida_kkr/calculations/kkrimp.py b/aiida_kkr/calculations/kkrimp.py
index d54639e5..c7bfe43c 100644
--- a/aiida_kkr/calculations/kkrimp.py
+++ b/aiida_kkr/calculations/kkrimp.py
@@ -31,7 +31,7 @@
__copyright__ = (u"Copyright (c), 2018, Forschungszentrum Jülich GmbH, "
"IAS-1/PGI-1, Germany. All rights reserved.")
__license__ = "MIT license, see LICENSE.txt file"
-__version__ = "0.5.0"
+__version__ = "0.5.1"
__contributors__ = (u"Philipp Rüßmann", u"Fabian Bertoldo")
#TODO: implement 'ilayer_center' consistency check
@@ -693,7 +693,8 @@ def get_remote_symlink(self, local_copy_list):
with comp.get_transport() as connection:
# do this for GMAT
uuid_GF = GF_local_copy_info[0]
- GF_remote_path = os.path.join(GFpath_remote, uuid_GF, GF_local_copy_info[1])
+ filename = GF_local_copy_info[1]
+ GF_remote_path = os.path.join(GFpath_remote, uuid_GF, filename)
# check if file exists on remote
if connection.isfile(GF_remote_path):
# remove GF from local copy list and add to remote symlink list
@@ -702,12 +703,17 @@ def get_remote_symlink(self, local_copy_list):
# do the same for TMAT
uuid_TM = TM_local_copy_info[0]
- TM_remote_path = os.path.join(GFpath_remote, uuid_TM, TM_local_copy_info[1])
+ filename = TM_local_copy_info[1]
+ TM_remote_path = os.path.join(GFpath_remote, uuid_TM, filename)
# check if file exists on remote
if connection.isfile(TM_remote_path):
# remove TMAT from local copy list and add to remote symlink list
local_copy_list.remove(TM_local_copy_info)
remote_symlink_list.append((comp.uuid, TM_remote_path, filename))
+ # print symlink and local copy list (for debugging purposes)
+ print('local_copy_list: {}'.format(local_copy_list))
+ print('symlink_list: {}'.format(remote_symlink_list))
+
# now return updated remote_symlink and local_copy lists
return remote_symlink_list, local_copy_list
From 34688c1bf9b3ce59d9b6c617b433cdb7f645373f Mon Sep 17 00:00:00 2001
From: ruess
Date: Wed, 20 Nov 2019 15:54:13 +0100
Subject: [PATCH 4/9] Implement cleanup for successful intermediate imp
calculations in kkr_imp_scf workflow
---
aiida_kkr/workflows/kkr_imp_sub.py | 77 +++++++++++++++++++++++++++++-
1 file changed, 76 insertions(+), 1 deletion(-)
diff --git a/aiida_kkr/workflows/kkr_imp_sub.py b/aiida_kkr/workflows/kkr_imp_sub.py
index 2978dd5c..d1cf557e 100644
--- a/aiida_kkr/workflows/kkr_imp_sub.py
+++ b/aiida_kkr/workflows/kkr_imp_sub.py
@@ -20,7 +20,7 @@
__copyright__ = (u"Copyright (c), 2017, Forschungszentrum Jülich GmbH, "
"IAS-1/PGI-1, Germany. All rights reserved.")
__license__ = "MIT license, see LICENSE.txt file"
-__version__ = "0.7.4"
+__version__ = "0.7.5"
__contributors__ = (u"Fabian Bertoldo", u"Philipp Ruessmann")
#TODO: work on return results function
@@ -1007,6 +1007,11 @@ def return_results(self):
"""
self.report(message)
+
+ if self.ctx.successful:
+ self.report("INFO: clean output of intermediate calcs")
+ remove_out_pot_intermediate_impcalcs(self.ctx.successful, all_pks)
+
self.report("INFO: done with kkr_scf workflow!\n")
@@ -1014,3 +1019,73 @@ def error_handler(self):
"""Capture errors raised in validate_input"""
if self.ctx.exit_code is not None:
return self.ctx.exit_code
+
+
+def remove_out_pot_intermediate_impcalcs(successful, pks_all_calcs, dry_run=False):
+ """
+ Remove out_potential file from all but the last KKRimp calculation if workflow was successful
+ Usage:
+ imp_wf = load_node(266885) # maybe start with outer workflow
+ pk_imp_scf = imp_wf.outputs.workflow_info['used_subworkflows'].get('kkr_imp_sub')
+ imp_scf_wf = load_node(pk_imp_scf) # this is now the imp scf sub workflow
+ successful = imp_scf_wf.outputs.workflow_info['successful']
+ pks_all_calcs = imp_scf_wf.outputs.workflow_info['pks_all_calcs']
+ """
+ import tarfile, os
+ from aiida.orm import load_node
+ from aiida.common.folders import SandboxFolder
+ from aiida_kkr.calculations import KkrimpCalculation
+
+ if dry_run:
+ print('test', successful, len(pks_all_calcs))
+
+ # name of tarfile
+ tfname = KkrimpCalculation._FILENAME_TAR
+
+ # cleanup only if calculation was successful
+ if successful and len(pks_all_calcs)>1:
+ # remove out_potential for calculations, except for last successful one
+ pks_for_cleanup = pks_all_calcs[:-1]
+
+ # loop over all calculations
+ for pk in pks_for_cleanup:
+ if dry_run:
+ print('pk_for_cleanup:', pk)
+ # get getreived folder of calc
+ calc = load_node(pk)
+ ret = calc.outputs.retrieved
+
+ # open tarfile if present
+ if tfname in ret.list_object_names():
+ delete_and_retar = False
+ with ret.open(tfname) as tf:
+ tf_abspath = tf.name
+
+ # create Sandbox folder which is used to temporarily extract output_all.tar.gz
+ tmpfolder = SandboxFolder()
+ tmpfolder_path = tmpfolder.abspath
+ with tarfile.open(tf_abspath) as tf:
+ tar_filenames = [ifile.name for ifile in tf.getmembers()]
+ # check if out_potential is in tarfile
+ if KkrimpCalculation._OUT_POTENTIAL in tar_filenames:
+ tf.extractall(tmpfolder_path)
+ delete_and_retar = True
+
+ if delete_and_retar and not dry_run:
+ # delete out_potential
+ os.remove(os.path.join(tmpfolder_path, KkrimpCalculation._OUT_POTENTIAL))
+ with tarfile.open(tf_abspath, 'w:gz') as tf:
+ # remove out_potential from list of files
+ tar_filenames = [i for i in tar_filenames if i!=KkrimpCalculation._OUT_POTENTIAL]
+ for f in tar_filenames:
+ # create new tarfile without out_potential file
+ fabs = os.path.join(tmpfolder_path, f)
+ tf.add(fabs, arcname=os.path.basename(fabs))
+ elif dry_run:
+ print('dry run:')
+ print('delete and retar?', delete_and_retar)
+ print('tmpfolder_path', tmpfolder_path)
+
+ # clean up temporary Sandbox folder
+ if not dry_run:
+ tmpfolder.erase()
From 284925c530b527d2a94fed706de1e35fc921c3d5 Mon Sep 17 00:00:00 2001
From: ruess
Date: Wed, 20 Nov 2019 16:07:54 +0100
Subject: [PATCH 5/9] Remove retrieved dos files after successful parsing in
kkr_imp_dos
---
aiida_kkr/workflows/kkr_imp_dos.py | 29 ++++++++++++++++++++++++++++-
1 file changed, 28 insertions(+), 1 deletion(-)
diff --git a/aiida_kkr/workflows/kkr_imp_dos.py b/aiida_kkr/workflows/kkr_imp_dos.py
index 6609dde0..207e404c 100644
--- a/aiida_kkr/workflows/kkr_imp_dos.py
+++ b/aiida_kkr/workflows/kkr_imp_dos.py
@@ -20,7 +20,7 @@
__copyright__ = (u"Copyright (c), 2019, Forschungszentrum Jülich GmbH, "
"IAS-1/PGI-1, Germany. All rights reserved.")
__license__ = "MIT license, see LICENSE.txt file"
-__version__ = "0.5.6"
+__version__ = "0.5.7"
__contributors__ = (u"Fabian Bertoldo", u"Philipp Ruessmann")
#TODO: improve workflow output node structure
@@ -62,6 +62,7 @@ class kkr_imp_dos_wc(WorkChain):
'use_mpi' : True} # execute KKR with mpi or without
_wf_default = {'ef_shift': 0. , # set custom absolute E_F (in eV)
+ 'clean_impcalc_retrieved': True, # remove output of KKRimp calculation after successful parsing of DOS files
}
# add defaults of dos_params since they are passed onto that workflow
@@ -185,6 +186,7 @@ def start(self):
# set workflow parameters for the KKR imputrity calculations
self.ctx.ef_shift = wf_dict.get('ef_shift', self._wf_default['ef_shift'])
self.ctx.dos_params_dict = wf_dict.get('dos_params', self._wf_default['dos_params'])
+ self.ctx.cleanup_impcalc_output = wf_dict.get('clean_impcalc_retrieved', self._wf_default['clean_impcalc_retrieved'])
# set workflow parameters for the KKR impurity calculation
self.ctx.nsteps = 1 # always only one step for DOS calculation
@@ -436,10 +438,16 @@ def return_results(self):
# interpol dos file and store to XyData nodes
dos_extracted, dosXyDatas = self.extract_dos_data(last_calc)
+ self.report('INFO: extracted DOS data? {}'.format(dos_extracted))
if dos_extracted:
self.out('dos_data', dosXyDatas['dos_data'])
self.out('dos_data_interpol', dosXyDatas['dos_data_interpol'])
+ # maybe cleanup retrieved folder of DOS calculation
+ if self.ctx.cleanup_impcalc_output:
+ self.report('INFO: cleanup after storing of DOS data')
+ pk_impcalc = self.ctx.kkrimp_dos.outputs.workflow_info['pks_all_calcs'][0]
+ cleanup_kkrimp_retrieved(pk_impcalc)
self.report('INFO: workflow_info node: {}'.format(outputnode_t.uuid))
@@ -609,3 +617,22 @@ def parse_impdosfiles(dos_abspath, natom, nspin, ef):
output = {'dos_data': dosnode, 'dos_data_interpol': dosnode2}
return output
+
+
+def cleanup_kkrimp_retrieved(pk_impcalc):
+ """
+ remove output_all.tar.gz from retrieved of impurity calculation identified by pk_impcalc
+ """
+ from aiida.orm import load_node
+ from aiida_kkr.calculations import KkrimpCalculation
+
+ # extract retrieved folder
+ doscalc = load_node(pk_impcalc)
+ ret = doscalc.outputs.retrieved
+
+ # name of tarfile
+ tfname = KkrimpCalculation._FILENAME_TAR
+
+ # remove tarfile from retreived dir
+ if tfname in ret.list_object_names():
+ ret.delete_object(tfname, force=True)
From 1fa0bc2f4c3021f54519ac1b00766b0bd93e878a Mon Sep 17 00:00:00 2001
From: ruess
Date: Fri, 22 Nov 2019 10:48:07 +0100
Subject: [PATCH 6/9] Add cleanup of intermediate sfd files in kkrimp_sub
workflow
---
.../tests/workflows/test_kkrimp_dos_wc.py | 1 +
aiida_kkr/workflows/kkr_imp_sub.py | 38 +++++++++++++++++--
2 files changed, 36 insertions(+), 3 deletions(-)
diff --git a/aiida_kkr/tests/workflows/test_kkrimp_dos_wc.py b/aiida_kkr/tests/workflows/test_kkrimp_dos_wc.py
index 765149c2..1a7b6fa1 100755
--- a/aiida_kkr/tests/workflows/test_kkrimp_dos_wc.py
+++ b/aiida_kkr/tests/workflows/test_kkrimp_dos_wc.py
@@ -76,6 +76,7 @@ def test_dos_startpot_wc(self):
from aiida.engine import run
print(builder)
out = run(builder)
+ print(out)
assert 'last_calc_info' in out.keys()
assert 'last_calc_output_parameters' in out.keys()
diff --git a/aiida_kkr/workflows/kkr_imp_sub.py b/aiida_kkr/workflows/kkr_imp_sub.py
index d1cf557e..c18a37c9 100644
--- a/aiida_kkr/workflows/kkr_imp_sub.py
+++ b/aiida_kkr/workflows/kkr_imp_sub.py
@@ -20,7 +20,7 @@
__copyright__ = (u"Copyright (c), 2017, Forschungszentrum Jülich GmbH, "
"IAS-1/PGI-1, Germany. All rights reserved.")
__license__ = "MIT license, see LICENSE.txt file"
-__version__ = "0.7.5"
+__version__ = "0.7.6"
__contributors__ = (u"Fabian Bertoldo", u"Philipp Ruessmann")
#TODO: work on return results function
@@ -173,7 +173,7 @@ def define(cls, spec):
# Define the outputs of the workflow
spec.output('workflow_info', valid_type=Dict)
- spec.output('host_imp_pot', valid_type=SinglefileData)
+ spec.output('host_imp_pot', valid_type=SinglefileData, required=False)
def start(self):
@@ -200,6 +200,8 @@ def start(self):
self.ctx.last_remote = None
# link to previous host impurity potential
self.ctx.last_pot = None
+ # intermediate single file data objects that contain potential files which can be clean up at the end
+ self.ctx.sfd_pot_to_clean = []
# convergence info about rms etc. (used to determine convergence behavior)
self.ctx.last_rms_all = []
self.ctx.rms_all_steps = []
@@ -679,6 +681,7 @@ def run_kkrimp(self):
emin = GF_out_params.get_dict().get('energy_contour_group').get('emin')
# then use this value to get rid of all core states that are lower than emin (return the same input potential if no states have been removed
imp_pot = kick_out_corestates_wf(imp_pot, Float(emin))
+ self.ctx.sfd_pot_to_clean.append(imp_pot)
if 'impurity_info' in self.inputs:
self.report('INFO: using impurity_info node as input for kkrimp calculation')
imp_info = self.inputs.impurity_info
@@ -751,6 +754,7 @@ def inspect_kkrimp(self):
# take potfile directly from output
with retrieved_folder.open(KkrimpCalculation._OUT_POTENTIAL, 'rb') as pot_file:
self.ctx.last_pot = SinglefileData(file=pot_file)
+ self.ctx.sfd_pot_to_clean.append(self.ctx.last_pot)
except:
self.report("ERROR: no output potential found")
return self.exit_codes.ERROR_NO_OUTPUT_POT_FROM_LAST_CALC
@@ -980,7 +984,9 @@ def return_results(self):
outputnode_t.store()
self.out('workflow_info', outputnode_t)
- self.out('host_imp_pot', self.ctx.last_pot)
+ # store out_potential as SingleFileData only if this was no DOS run
+ if not self.ctx.dos_run:
+ self.out('host_imp_pot', self.ctx.last_pot)
# print results table for overview
# table layout:
@@ -1012,6 +1018,18 @@ def return_results(self):
self.report("INFO: clean output of intermediate calcs")
remove_out_pot_intermediate_impcalcs(self.ctx.successful, all_pks)
+ # clean intermediate single file data which are not needed after successful run or after DOS run
+ if self.ctx.successful or self.ctx.dos_run:
+ uuid_last_calc = self.ctx.last_pot.uuid
+ if not self.ctx.dos_run:
+ sfds_to_clean = [i for i in self.ctx.sfd_pot_to_clean if i.uuid!=uuid_last_calc]
+ else:
+ # in case of DOS run we can also clean the last output sfd file since this is never used
+ sfds_to_clean = self.ctx.sfd_pot_to_clean
+ # now clean all sfd files that are not needed anymore
+ for sfd_to_clean in sfds_to_clean:
+ clean_sfd(sfd_to_clean)
+
self.report("INFO: done with kkr_scf workflow!\n")
@@ -1089,3 +1107,17 @@ def remove_out_pot_intermediate_impcalcs(successful, pks_all_calcs, dry_run=Fals
# clean up temporary Sandbox folder
if not dry_run:
tmpfolder.erase()
+
+
+
+def clean_sfd(sfd_to_clean, nkeep=30):
+ with sfd_to_clean.open(sfd_to_clean.filename) as f:
+ txt = f.readlines()
+ # remove all lines after nkeep lines
+ txt2 = txt[:nkeep]
+ # add note to end of file
+ txt2+= [u'WARNING: REST OF FILE WAS CLEANED SO SAVE SPACE!!!\n']
+ # overwrite file
+ with sfd_to_clean.open(sfd_to_clean.filename, 'w') as fnew:
+ fnew.writelines(txt2)
+
From ff86157ea250ad70b13ab048c1f5c1e89bbdfb65 Mon Sep 17 00:00:00 2001
From: ruess
Date: Fri, 22 Nov 2019 14:01:43 +0100
Subject: [PATCH 7/9] Implement final cleanup for kkrimp full workflow
---
aiida_kkr/workflows/kkr_imp.py | 29 +++++++++++++++++++++++++++--
aiida_kkr/workflows/kkr_imp_sub.py | 21 ++++++++++++---------
2 files changed, 39 insertions(+), 11 deletions(-)
diff --git a/aiida_kkr/workflows/kkr_imp.py b/aiida_kkr/workflows/kkr_imp.py
index 8b459cec..33602d8e 100644
--- a/aiida_kkr/workflows/kkr_imp.py
+++ b/aiida_kkr/workflows/kkr_imp.py
@@ -14,13 +14,13 @@
from aiida_kkr.tools.common_workfunctions import test_and_get_codenode, neworder_potential_wf, update_params_wf
from aiida_kkr.workflows.gf_writeout import kkr_flex_wc
from aiida_kkr.workflows.voro_start import kkr_startpot_wc
-from aiida_kkr.workflows.kkr_imp_sub import kkr_imp_sub_wc
+from aiida_kkr.workflows.kkr_imp_sub import kkr_imp_sub_wc, clean_sfd
import numpy as np
__copyright__ = (u"Copyright (c), 2017, Forschungszentrum Jülich GmbH, "
"IAS-1/PGI-1, Germany. All rights reserved.")
__license__ = "MIT license, see LICENSE.txt file"
-__version__ = "0.6.7"
+__version__ = "0.6.8"
__contributors__ = (u"Fabian Bertoldo", u"Philipp Ruessmann")
#TODO: generalize workflow to multiple impurities
#TODO: add additional checks for the input
@@ -250,6 +250,9 @@ def start(self):
'mag_init': self.ctx.mag_init, 'hfield': self.ctx.hfield, 'init_pos': self.ctx.init_pos,
'accuracy_params': self.ctx.accuracy_params})
+ # list of things that are cleaned if everything ran through
+ self.ctx.sfd_final_cleanup = []
+
# report the chosen parameters to the user
self.report('INFO: use the following parameter:\n'
@@ -538,6 +541,8 @@ def construct_startpot(self):
# add starting potential for kkrimp calculation to context
self.ctx.startpot_kkrimp = startpot_kkrimp
+ # add to list for final cleanup
+ self.ctx.sfd_final_cleanup.append(startpot_kkrimp)
self.report('INFO: created startpotential (pid: {}) for the impurity calculation '
'by using information of the GF host calculation (pid: {}), the potential of the '
@@ -628,7 +633,11 @@ def return_results(self):
self.out('workflow_info', outputnode_t)
self.out('last_calc_output_parameters', last_calc_output_params)
self.out('last_calc_info', last_calc_info)
+
+ # cleanup things that are not needed anymore
+ self.final_cleanup()
+ # print final message before exiting
self.report('INFO: created 3 output nodes for the KKR impurity workflow.')
self.report('\n'
'|------------------------------------------------------------------------------------------------------------------|\n'
@@ -637,6 +646,22 @@ def return_results(self):
else:
self.report(self.exit_codes.ERROR_KKRIMP_SUB_WORKFLOW_FAILURE)
return self.exit_codes.ERROR_KKRIMP_SUB_WORKFLOW_FAILURE
+
+ def final_cleanup(self):
+ """
+ Remove unneeded files to save space
+ """
+ for sfd in self.ctx.sfd_final_cleanup:
+ clean_sfd(sfd)
+ if self.ctx.create_startpot:
+ kkr_startpot = self.ctx.last_voro_calc
+ vorocalc = kkr_startpot.outputs.last_voronoi_remote.get_incoming(link_label_filter=u'remote_folder').first().node
+ ret = vorocalc.outputs.retrieved
+ for fname in ret.list_object_names():
+ if fname!=VoronoiCalculation._OUTPUT_FILE_NAME:
+ # delete all except vor default output file
+ with ret.open(fname) as f:
+ ret.delete_object(fname, force=True)
@calcfunction
diff --git a/aiida_kkr/workflows/kkr_imp_sub.py b/aiida_kkr/workflows/kkr_imp_sub.py
index c18a37c9..4dd298b2 100644
--- a/aiida_kkr/workflows/kkr_imp_sub.py
+++ b/aiida_kkr/workflows/kkr_imp_sub.py
@@ -1020,15 +1020,7 @@ def return_results(self):
# clean intermediate single file data which are not needed after successful run or after DOS run
if self.ctx.successful or self.ctx.dos_run:
- uuid_last_calc = self.ctx.last_pot.uuid
- if not self.ctx.dos_run:
- sfds_to_clean = [i for i in self.ctx.sfd_pot_to_clean if i.uuid!=uuid_last_calc]
- else:
- # in case of DOS run we can also clean the last output sfd file since this is never used
- sfds_to_clean = self.ctx.sfd_pot_to_clean
- # now clean all sfd files that are not needed anymore
- for sfd_to_clean in sfds_to_clean:
- clean_sfd(sfd_to_clean)
+ self.final_cleanup()
self.report("INFO: done with kkr_scf workflow!\n")
@@ -1038,6 +1030,17 @@ def error_handler(self):
if self.ctx.exit_code is not None:
return self.ctx.exit_code
+ def final_cleanup(self):
+ uuid_last_calc = self.ctx.last_pot.uuid
+ if not self.ctx.dos_run:
+ sfds_to_clean = [i for i in self.ctx.sfd_pot_to_clean if i.uuid!=uuid_last_calc]
+ else:
+ # in case of DOS run we can also clean the last output sfd file since this is never used
+ sfds_to_clean = self.ctx.sfd_pot_to_clean
+ # now clean all sfd files that are not needed anymore
+ for sfd_to_clean in sfds_to_clean:
+ clean_sfd(sfd_to_clean)
+
def remove_out_pot_intermediate_impcalcs(successful, pks_all_calcs, dry_run=False):
"""
From 680a50e8dfe6e8ca2bf0214f513e287140fb9875 Mon Sep 17 00:00:00 2001
From: ruess
Date: Mon, 25 Nov 2019 13:53:18 +0100
Subject: [PATCH 8/9] Implement cleaning of raw_input directory for
KkrimpCalculation after successful kkr_imp_sub workflow
---
aiida_kkr/workflows/kkr_imp_sub.py | 39 +++++++++++++++++++++++++-----
1 file changed, 33 insertions(+), 6 deletions(-)
diff --git a/aiida_kkr/workflows/kkr_imp_sub.py b/aiida_kkr/workflows/kkr_imp_sub.py
index 4dd298b2..3faaa474 100644
--- a/aiida_kkr/workflows/kkr_imp_sub.py
+++ b/aiida_kkr/workflows/kkr_imp_sub.py
@@ -20,7 +20,7 @@
__copyright__ = (u"Copyright (c), 2017, Forschungszentrum Jülich GmbH, "
"IAS-1/PGI-1, Germany. All rights reserved.")
__license__ = "MIT license, see LICENSE.txt file"
-__version__ = "0.7.6"
+__version__ = "0.7.7"
__contributors__ = (u"Fabian Bertoldo", u"Philipp Ruessmann")
#TODO: work on return results function
@@ -1015,8 +1015,10 @@ def return_results(self):
if self.ctx.successful:
- self.report("INFO: clean output of intermediate calcs")
- remove_out_pot_intermediate_impcalcs(self.ctx.successful, all_pks)
+ self.report("INFO: clean output of calcs")
+ remove_out_pot_impcalcs(self.ctx.successful, all_pks)
+ self.report("INFO: clean up raw_input folders")
+ clean_raw_input(self.ctx.successful, all_pks)
# clean intermediate single file data which are not needed after successful run or after DOS run
if self.ctx.successful or self.ctx.dos_run:
@@ -1042,7 +1044,7 @@ def final_cleanup(self):
clean_sfd(sfd_to_clean)
-def remove_out_pot_intermediate_impcalcs(successful, pks_all_calcs, dry_run=False):
+def remove_out_pot_impcalcs(successful, pks_all_calcs, dry_run=False):
"""
Remove out_potential file from all but the last KKRimp calculation if workflow was successful
Usage:
@@ -1065,8 +1067,9 @@ def remove_out_pot_intermediate_impcalcs(successful, pks_all_calcs, dry_run=Fals
# cleanup only if calculation was successful
if successful and len(pks_all_calcs)>1:
- # remove out_potential for calculations, except for last successful one
- pks_for_cleanup = pks_all_calcs[:-1]
+ # remove out_potential for calculations
+ # note that also last calc can be cleaned since output potential is stored in single file data
+ pks_for_cleanup = pks_all_calcs[:]
# loop over all calculations
for pk in pks_for_cleanup:
@@ -1111,6 +1114,30 @@ def remove_out_pot_intermediate_impcalcs(successful, pks_all_calcs, dry_run=Fals
if not dry_run:
tmpfolder.erase()
+def clean_raw_input(successful, pks_calcs, dry_run=False):
+ """
+ Clean raw_input directories that contain copies of shapefun and potential files
+ This however breaks provenance (strictly speaking) and therefore should only be done
+ for the calculations of a successfully finished workflow (see email on mailing list from 25.11.2019).
+ """
+ from aiida.orm import load_node
+ from aiida_kkr.calculations import KkrimpCalculation
+ if successful:
+ for pk in pks_calcs:
+ node = load_node(pk)
+ # clean only nodes that are KkrimpCalculations
+ if node.process_class==KkrimpCalculation:
+ raw_input_folder = node._raw_input_folder
+ # clean potential and shapefun files
+ for filename in [KkrimpCalculation._POTENTIAL, KkrimpCalculation._SHAPEFUN]:
+ if filename in raw_input_folder.get_content_list():
+ if dry_run:
+ print('clean {}'.format(filename))
+ else:
+ raw_input_folder.remove_path(filename)
+ elif dry_run:
+ print('no raw_inputs to clean')
+
def clean_sfd(sfd_to_clean, nkeep=30):
From e1b20a83cc525a0e4539b8e176c6421b7b0cbe7e Mon Sep 17 00:00:00 2001
From: ruess
Date: Mon, 27 Jan 2020 11:30:24 +0100
Subject: [PATCH 9/9] =?UTF-8?q?Add=20plotting=20functionality=20for=20imp?=
=?UTF-8?q?=20workflows=20Bump=20version:=201.1.9-dev=20=E2=86=92=201.1.10?=
=?UTF-8?q?-dev?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.bumpversion.cfg | 2 +-
aiida_kkr/__init__.py | 2 +-
aiida_kkr/tests/run_all.sh | 12 +++-
aiida_kkr/tools/plot_kkr.py | 129 ++++++++++++++++++++++++++++++++----
pyproject.toml | 2 +-
setup.json | 2 +-
6 files changed, 131 insertions(+), 18 deletions(-)
diff --git a/.bumpversion.cfg b/.bumpversion.cfg
index cada2c00..decc3527 100644
--- a/.bumpversion.cfg
+++ b/.bumpversion.cfg
@@ -1,5 +1,5 @@
[bumpversion]
-current_version = 1.1.9-dev
+current_version = 1.1.10-dev
commit = True
tag = True
parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+))?
diff --git a/aiida_kkr/__init__.py b/aiida_kkr/__init__.py
index 04055ca6..3215ec77 100644
--- a/aiida_kkr/__init__.py
+++ b/aiida_kkr/__init__.py
@@ -2,4 +2,4 @@
AiiDA KKR
"""
-__version__ = "1.1.9-dev"
+__version__ = "1.1.10-dev"
diff --git a/aiida_kkr/tests/run_all.sh b/aiida_kkr/tests/run_all.sh
index 7f2025ac..8fa38879 100755
--- a/aiida_kkr/tests/run_all.sh
+++ b/aiida_kkr/tests/run_all.sh
@@ -11,7 +11,17 @@ mkdir -p '.aiida';
# if the environment variable is unset or the empty string then the corresponding tests will be ignored
usage(){
- echo "$0 usage:" && grep " .)\ #" $0; exit 0;
+ echo "$0 usage:" && grep " .)\ #" $0;
+ echo
+ echo "Default behavior is to run only tests that do not require running an actual calculation (i.e. skip workflow tests)."
+ echo "Additional settings with the environment variables (set to something or unset to remove setting):";
+ echo " 'RUN_ALL': run all tests";
+ echo " 'SKIP_NOWORK': skip workflow tests";
+ echo " 'RUN_VORONOI': run voronoi tests";
+ echo " 'RUN_KKRHOST': run kkrhost tests";
+ echo " 'RUN_KKRIMP': run kkrimp tests";
+ echo
+ exit 0;
}
addopt=""
diff --git a/aiida_kkr/tools/plot_kkr.py b/aiida_kkr/tools/plot_kkr.py
index 553d4121..db1fcebd 100644
--- a/aiida_kkr/tools/plot_kkr.py
+++ b/aiida_kkr/tools/plot_kkr.py
@@ -10,7 +10,7 @@
__copyright__ = (u"Copyright (c), 2018, Forschungszentrum Jülich GmbH, "
"IAS-1/PGI-1, Germany. All rights reserved.")
__license__ = "MIT license, see LICENSE.txt file"
-__version__ = "0.4.10"
+__version__ = "0.5.0"
__contributors__ = ("Philipp Rüßmann")
@@ -352,7 +352,8 @@ def dosplot(self, d, natoms, nofig, all_atoms, l_channels, sum_spins, switch_xy,
n0 = len(show_atoms)
if n0==0: n0=natoms
#pcycle_values = array([j for i in range(n0) for j in pcycle_values]).reshape(-1)
- pcycle_values = list(pcycle_values[:n0]) + list(pcycle_values[:n0])
+ #pcycle_values = list(pcycle_values[:n0]) + list(pcycle_values[:n0])
+ pcycle_values = array([[i,i] for i in pcycle_values]).reshape(-1)
else:
pcycle_values = array([[i,i] for i in pcycle_values]).reshape(-1)
pcycle_default = cycler('color', pcycle_values)
@@ -424,13 +425,17 @@ def dosplot(self, d, natoms, nofig, all_atoms, l_channels, sum_spins, switch_xy,
legend(fontsize='x-small')
- def rmsplot(self, rms, neutr, nofig, ptitle, logscale, only=None, **kwargs):
+ def rmsplot(self, rms, neutr, nofig, ptitle, logscale, only=None, rename_second=None, **kwargs):
"""plot rms and charge neutrality"""
from numpy import array
from matplotlib.pylab import figure, plot, twinx, xlabel, ylabel, legend, subplots_adjust, title, gca
if not nofig: figure()
+ # allow to overwrite name for second quantity, plotted on second y axis
+ name_second_y = 'charge_neutrality'
+ if rename_second is not None:
+ name_second_y = rename_second
if only is None:
if 'label' not in list(kwargs.keys()):
@@ -444,12 +449,12 @@ def rmsplot(self, rms, neutr, nofig, ptitle, logscale, only=None, **kwargs):
twinx()
if logscale: neutr = abs(array(neutr))
if 'label' not in list(kwargs.keys()):
- label='charge neutrality'
+ label=name_second_y
else:
label=kwargs.pop('label')
plot(neutr,'-or', label=label)
ax2 = gca()
- ylabel('charge neutrality', color='r')
+ ylabel(name_second_y, color='r')
if logscale:
ax1.set_yscale('log')
ax2.set_yscale('log')
@@ -464,7 +469,7 @@ def rmsplot(self, rms, neutr, nofig, ptitle, logscale, only=None, **kwargs):
elif only=='neutr':
if logscale: neutr = abs(array(neutr))
plot(neutr, **kwargs)
- ylabel('neutr')
+ ylabel(name_second_y)
xlabel('iteration')
ax1 = gca()
if logscale:
@@ -675,20 +680,117 @@ def plot_voro_calc(self, node, **kwargs):
# TODO maybe plot some output of voronoi
- def plot_kkrimp_calc(self, node, **kwargs):
+ def plot_kkrimp_calc(self, node, return_rms=False, return_stot=False, **kwargs):
"""plot things from a kkrimp Calculation node"""
- print("Plotting not implemented yet")
- pass
+ from numpy import array, ndarray
+ from numpy import sqrt, sum
+
+ # read data from output node
+ rms_goal, rms = None, []
+ if node.is_finished_ok:
+ out_para = node.outputs.output_parameters
+ out_para_dict = out_para.get_dict()
+ out_para_dict['convergence_group']['rms_all_iterations']
+ rms = out_para_dict['convergence_group']['rms_all_iterations']
+ rms_goal = out_para_dict['convergence_group']['qbound']
+ # extract total magnetic moment
+ nat = out_para_dict['number_of_atoms_in_unit_cell']
+ s = array(out_para_dict['convergence_group']['spin_moment_per_atom_all_iterations'], dtype=float)
+ ss = sqrt(sum(s**2, axis=1)).reshape(-1,nat)
+ stot = sum(ss, axis=1)
+
+ # now return values
+ return_any, return_list = False, []
+ if return_rms:
+ return_list += [rms, rms_goal]
+ return_any = True
+ if return_stot:
+ return_list += [stot]
+ return_any = True
+ if return_any:
+ return return_list
+
def plot_kkrimp_wc(self, node, **kwargs):
"""plot things from a kkrimp_wc workflow"""
- print("Plotting not implemented yet")
- pass
+
+ # call imp_sub plotting from here
+ from aiida_kkr.workflows import kkr_imp_sub_wc
+ sub_wf = [i.node for i in node.get_outgoing(node_class=kkr_imp_sub_wc).all()][0]
+ self.plot_kkrimp_sub_wc(sub_wf)
+
def plot_kkrimp_sub_wc(self, node, **kwargs):
"""plot things from a kkrimp_sub_wc workflow"""
- print("Plotting not implemented yet")
- pass
+ from aiida_kkr.calculations import KkrimpCalculation
+ from numpy import array
+ from matplotlib.pyplot import figure, subplot, axhline, axvline, gca, ylim
+
+ # extract rms from calculations
+ impcalcs = [i.node for i in node.get_outgoing(node_class=KkrimpCalculation).all()]
+ rms_all, pks_all, stot_all = [], [], []
+ rms_goal = None
+ for impcalc in impcalcs:
+ pks_all.append(impcalc.pk)
+ rms_tmp, rms_goal_tmp, stot_tmp = self.plot_kkrimp_calc(impcalc, return_rms=True, return_stot=True)
+ rms_all.append(rms_tmp)
+ if rms_goal_tmp is not None:
+ if rms_goal is not None:
+ rms_goal = min(rms_goal, rms_goal_tmp)
+ else:
+ rms_goal = rms_goal_tmp
+ stot_all.append(stot_tmp)
+
+ # extract options from kwargs
+ nofig = False
+ if 'nofig' in list(kwargs.keys()): nofig = kwargs.pop('nofig')
+ logscale = True
+ if 'logscale' in list(kwargs.keys()): logscale = kwargs.pop('logscale')
+ if 'subplot' in list(kwargs.keys()):
+ subplots = kwargs.pop('subplot')
+ else:
+ subplots = None
+ if 'label' in list(kwargs.keys()):
+ label = kwargs.pop('label')
+ else:
+ label = None
+ if 'ptitle' in list(kwargs.keys()):
+ ptitle = kwargs.pop('ptitle')
+ else:
+ ptitle = 'pk= {}'.format(node.pk)
+ if 'only' in list(kwargs.keys()):
+ only = kwargs.pop('only')
+ else:
+ only = None
+
+ # plotting of convergence properties (rms etc.)
+ if len(rms_all)>0:
+ # sort rms values and flatten array
+ reorder_rms = array(pks_all).argsort()
+ rms, niter_calcs, stot = [], [0], []
+ for i in array(rms_all)[reorder_rms]:
+ rms += list(i)
+ niter_calcs.append(len(i)-0.5)
+ for i in array(stot_all)[reorder_rms]:
+ stot += list(i)
+ # now plot
+ if len(rms)>0:
+ if not nofig:
+ figure()
+ if subplots is not None:
+ subplot(subplots[0], subplots[1], subplots[2])
+ if rms_goal is not None: axhline(rms_goal, color='grey', ls='--')
+ self.rmsplot(rms, stot, nofig=True, ptitle=ptitle, logscale=logscale, only=only, rename_second='sum(spinmom)', label=label)
+ # adapt y-limits to take care of showing spin-moment on sensible scale
+ if only is None:
+ yl = gca().get_ylim()
+ ylim(yl[0], max(yl[1], 0.1))
+ # add lines that indicate different calculations
+ tmpsum = 1
+ if not nofig and len(niter_calcs)>1:
+ for i in niter_calcs:
+ tmpsum+=i
+ axvline(tmpsum-1, color='k', ls=':')
def plot_kkrimp_dos_wc(self, node, **kwargs):
@@ -730,6 +832,7 @@ def plot_kkrimp_dos_wc(self, node, **kwargs):
if calcnode.is_finished_ok:
natoms = len(calcnode.outputs.output_parameters.get_dict().get('charge_core_states_per_atom'))
self.dosplot(d, natoms, nofig, all_atoms, l_channels, sum_spins, switch_xy, switch_sign_spin2, yscale=yscale, **kwargs)
+ title('pk= {}'.format(node.pk))
### workflows ###
diff --git a/pyproject.toml b/pyproject.toml
index 444d2a34..d07dc33c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3,7 +3,7 @@ requires = ["setuptools", "wheel"]
[tool.poetry]
name = "aiida-kkr"
-version = "1.1.9-dev"
+version = "1.1.10-dev"
description = "AiiDA plugin for the KKR code"
authors = ["Philipp Rüssmann ",
"Jens Bröder ",
diff --git a/setup.json b/setup.json
index fe24b70c..850f299d 100644
--- a/setup.json
+++ b/setup.json
@@ -16,7 +16,7 @@
"Natural Language :: English",
"Framework :: AiiDA"
],
- "version": "1.1.9-dev",
+ "version": "1.1.10-dev",
"reentry_register": true,
"install_requires": [
"aiida-core >= 1.0.0b6,<2.0.0",