diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index f3da7a544..7d9cd2a4a 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -31,6 +31,33 @@ Change Log
- [???] "asynch" multienv
- [???] properly model interconnecting powerlines
+[1.9.2] - 2023-07-26
+---------------------
+- [BREAKING] rename with filename starting with lowercase all the files in the "`Backend`", "`Action`" and
+ "`Environment`" modules. This is both consistent with python practice but allows also to make the
+ difference between the files in the
+ module and the class imported. This should have little to no impact on all codes but to "upgrade"
+ instead of `from grid2op.Action.BaseAction import BaseAction` (which you should not have done in the first place)
+ just do `from grid2op.Action import BaseAction`. Expect other changes like this for other grid2op modules
+ in the near future.
+- [FIXED] broken environ "l2rpn_idf_2023" (with test=True) due to the presence of a `__pycache__` folder
+- [FIXED] time series `MultiFolder` will now ignore folder `__pycache__`
+- [FIXED] an issue with compatibility with previous versions (due to alert)
+- [FIXED] an issue with the `_ObsEnv` when using reward that could not be used in forecast (`self.is_simulated_env()`
+ was not working as expected due to a wrong init of the reward in `_ObsEnv`)
+- [FIXED] an issue when disconnecting loads / generators / storage units and changing their values in the same
+ action: the behaviour could depend on the backend. As of 1.9.2 the "disconnections" have the priority (if
+ an action disconnect an element, it will not change its sepoint at the same time).
+- [FIXED] a bug in `AlertReward` due to `reset` not being called.
+- [FIXED] issue https://github.com/rte-france/Grid2Op/issues/494
+- [ADDED] the score function used for the L2RPN 2023 competition (Paris Area)
+- [IMPROVED] overall performances by calling `arr.sum()` or `arr.any()` instead of `np.sum(arr)` or
+ `np.any(arr)` see https://numpy.org/neps/nep-0018-array-function-protocol.html#performance
+- [IMPROVED] overall performance of `obs.simulate` function by improving speed of copy of `_BackendAction`
+- [IMPROVED] overall performance of `env.step` / `obs.simulate` by preventing unnecessary observation deep copy
+- [IMPROVED] overall performance of `env.step` / `obs.simulate` by switching to `copy.deepcopy(obs)` instead of
+ `obs.copy()`
+
[1.9.1] - 2023-07-06
--------------------
- [BREAKING] (slightly): default `gym_compat` module now inherit from `gymnasium` (if
diff --git a/MANIFEST.in b/MANIFEST.in
index 6da49fdbe..25337d7a1 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1 +1,5 @@
-recursive-include grid2op/data *.bz2 *.json *.zip prods_charac.csv *.py .multimix storage_units_charac.csv start_datetime.info time_interval.info
\ No newline at end of file
+recursive-include grid2op/data *.bz2 *.json *.zip prods_charac.csv *.py .multimix storage_units_charac.csv start_datetime.info time_interval.info
+global-exclude */__pycache__/*
+global-exclude *.pyc
+global-exclude grid2op/data_test/*
+global-exclude grid2op/tests/*
diff --git a/_profiling/profiler_gym_compat.py b/_profiling/profiler_gym_compat.py
new file mode 100644
index 000000000..31c0c96e8
--- /dev/null
+++ b/_profiling/profiler_gym_compat.py
@@ -0,0 +1,81 @@
+# Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
+# See AUTHORS.txt
+# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
+# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
+# you can obtain one at http://mozilla.org/MPL/2.0/.
+# SPDX-License-Identifier: MPL-2.0
+# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
+
+
+"""
+This file aims at profiling a case where the "simulate" function is heavily used.
+"""
+
+import grid2op
+from grid2op.gym_compat import GymEnv
+
+import warnings
+try:
+ from lightsim2grid import LightSimBackend
+ bk_cls = LightSimBackend
+ nm_bk_used = "LightSimBackend"
+ print("LightSimBackend used")
+except ImportError:
+ from grid2op.Backend import PandaPowerBackend
+ bk_cls = PandaPowerBackend
+ nm_bk_used = "PandaPowerBackend"
+ print("PandaPowerBackend used")
+
+import os
+import cProfile
+import pdb
+
+
+NB_SIMULATE = 10
+ENV_NAME = "l2rpn_icaps_2021_small"
+ENV_NAME = "l2rpn_idf_2023"
+
+
+def make_env(env_name=ENV_NAME):
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore")
+ fake_env = grid2op.make(env_name, test=True)
+ param = fake_env.parameters
+ param.NO_OVERFLOW_DISCONNECTION = True
+ env = grid2op.make(env_name, backend=bk_cls(), param=param)
+ env.seed(0)
+ env.reset()
+ gym_env = GymEnv(env)
+ return gym_env, env
+
+
+def run_env(gym_env, cp_gym_env, env, cp_env):
+ done = False
+ while not done:
+ act = {}
+ cp_gym_env.enable()
+ obs, reward, done, truncated, info = gym_env.step(act)
+ cp_gym_env.disable()
+
+ done = False
+ while not done:
+ act = env.action_space()
+ cp_env.enable()
+ obs, reward, done, info = env.step(act)
+ cp_env.disable()
+
+
+if __name__ == "__main__":
+ gym_env, env = make_env()
+ cp_gym = cProfile.Profile()
+ cp_env = cProfile.Profile()
+ run_env(gym_env, cp_gym, env, cp_env)
+ nm_f, ext = os.path.splitext(__file__)
+ nm_out_gym = f"gym_{nm_f}_{nm_bk_used}_{ENV_NAME}_gymenv.prof"
+ nm_out_env = f"gym_{nm_f}_{nm_bk_used}_{ENV_NAME}_env.prof"
+ cp_gym.dump_stats(nm_out_gym)
+ cp_env.dump_stats(nm_out_env)
+ print("You can view profiling grid2op raw results with:\n\tsnakeviz {}".format(nm_out_env))
+ print("You can view profiling gym results with:\n\tsnakeviz {}".format(nm_out_gym))
+# base: 66.7 s
+# sans copy dans simulate: 65.2
diff --git a/_profiling/profiler_simulate.py b/_profiling/profiler_simulate.py
index cae2c3292..bb60b3dad 100644
--- a/_profiling/profiler_simulate.py
+++ b/_profiling/profiler_simulate.py
@@ -29,37 +29,61 @@
import pdb
-def make_env():
- env_name = "l2rpn_icaps_2021"
+NB_SIMULATE = 10
+ENV_NAME = "l2rpn_icaps_2021_small"
+ENV_NAME = "l2rpn_idf_2023"
+
+
+def make_env(env_name=ENV_NAME):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
fake_env = grid2op.make(env_name, test=True)
param = fake_env.parameters
param.NO_OVERFLOW_DISCONNECTION = True
- env = grid2op.make(env_name+"_small", backend=LightSimBackend(), param=param)
+ env = grid2op.make(env_name, backend=bk_cls(), param=param)
+ env.seed(0)
+ env.reset()
return env
-def run_env(env):
+def run_env(env, cp_env, cp_simu):
done = False
while not done:
act = env.action_space()
+ cp_env.enable()
obs, reward, done, info = env.step(act)
+ cp_env.disable()
if not done:
- simulate(obs, env.action_space())
+ simulate(obs, env.action_space, NB_SIMULATE, cp_simu)
-def simulate(obs, act):
- simobs, rim_r, sim_d, sim_info = obs.simulate(act)
+def simulate(obs, action_space, nb_simu=NB_SIMULATE, cp=None):
+ acts = [action_space.sample() for _ in range(nb_simu)]
+ # acts = [action_space() for _ in range(nb_simu)]
+ tmp = sum(acts, start = action_space())
+ try:
+ if cp is not None:
+ cp.enable()
+ for i in range(nb_simu):
+ simobs, rim_r, sim_d, sim_info = obs.simulate(acts[i])
+ prev_act = acts[i]
+ if cp is not None:
+ cp.disable()
+ except RuntimeError as exc_:
+ raise exc_
if __name__ == "__main__":
env = make_env()
- cp = cProfile.Profile()
- cp.enable()
- run_env(env)
- cp.disable()
+ cp_simu = cProfile.Profile()
+ cp_env = cProfile.Profile()
+ run_env(env, cp_env, cp_simu)
nm_f, ext = os.path.splitext(__file__)
- nm_out = f"{nm_f}_{nm_bk_used}.prof"
- cp.dump_stats(nm_out)
- print("You can view profiling results with:\n\tsnakeviz {}".format(nm_out))
+ nm_out_simu = f"{nm_f}_{nm_bk_used}_{ENV_NAME}_{NB_SIMULATE}_simu.prof"
+ nm_out_env = f"{nm_f}_{nm_bk_used}_{ENV_NAME}_{NB_SIMULATE}_env.prof"
+ cp_simu.dump_stats(nm_out_simu)
+ cp_env.dump_stats(nm_out_env)
+ print("You can view profiling results with:\n\tsnakeviz {}".format(nm_out_env))
+ print("You can view profiling results with:\n\tsnakeviz {}".format(nm_out_simu))
+# base: 66.7 s
+# sans copy dans simulate: 65.2
diff --git a/docs/available_envs.rst b/docs/available_envs.rst
index 29ecf8ef6..f2c8da22c 100644
--- a/docs/available_envs.rst
+++ b/docs/available_envs.rst
@@ -4,6 +4,10 @@
.. |l2rpn_neurips_2020_track1_layout| image:: ./img/l2rpn_neurips_2020_track1_layout.png
.. |l2rpn_neurips_2020_track2_layout| image:: ./img/l2rpn_neurips_2020_track2_layout.png
.. |l2rpn_wcci_2022_layout| image:: ./img/l2rpn_wcci_2022_layout.png
+.. |l2rpn_idf_2023_layout| image:: ./img/l2rpn_idf_2023_layout.png
+.. |l2rpn_idf_2023_areas| image:: ./img/l2rpn_idf_2023_areas.png
+.. |l2rpn_idf_2023_maint| image:: ./img/l2rpn_idf_2023_maint.png
+.. |l2rpn_idf_2023_att| image:: ./img/l2rpn_idf_2023_att.png
Available environments
@@ -55,6 +59,7 @@ env name grid size maintenance opponent redis
:ref:`l2rpn_neurips_2020_track2` 118 sub. ✔️ ️ ❌ ️ ✔️ ️ ❌
:ref:`l2rpn_icaps_2021` 36 sub. ✔️ ️ ✔️ ️ ✔️ ️ ❌
:ref:`l2rpn_wcci_2022` 118 sub. ✔️ ️ ✔️ ️ ✔️ ️ ✔️ ️
+:ref:`l2rpn_idf_2023` 118 sub. ✔️ ️ ✔️ ️ ✔️ ️ ✔️ ️
\* educ_case14_redisp \* 14 sub. ❌️ ❌ ️ ️ ✔️ ️ ❌
\* educ_case14_storage \* 14 sub. ❌️ ❌ ️ ✔️ ️ ✔️
\* rte_case5_example \* 5 sub. ❌️ ❌ ️ ️ ❌ ️ ️ ❌
@@ -139,6 +144,83 @@ This grid looks like:
|l2rpn_case14_sandbox_layout|
+.. _l2rpn_idf_2023:
+
+
+l2rpn_idf_2023
+++++++++++++++++
+
+This environment is also based on the 118 grid. The original grid has been modified (mainly for generator and loads location) to
+accomodate for the "possible energy mix" of France in 2035.
+
+It comes with 16 years worth of data, 1 year being divided in 52 weeks so 16 x 52 = 832 different scenarios and takes up around
+~ 5.4 GB of space.
+
+To create it you can :
+
+.. code-block:: python
+
+ import grid2op
+ env_name = "lrpn_idf_2023"
+ env = grid2op.make(env_name)
+
+It counts 118 substations, 186 powerlines, 99 loads and 62 generators. It will be used for the L2RPN competitions funded by Region Ile De France:
+"Paris Region AI Challenge Energy Transition" and is free to use for everyone.
+
+You have the possibility, provided that you installed `chronix2grid` (with `pip install grid2op[chronix2grid]`), to generate as
+much data as you want with the :func:`grid2op.Environment.Environment.generate_data` function. See its documentation for more information.
+
+The environment can be seen:
+
+|l2rpn_idf_2023_layout|
+
+Compared to previous available environments there are some new features including:
+
+- 12 steps ahead forecast: with any observation, you can now have access to forecast 12 steps ahead with, for example `obs.simulate(..., time_step=12)`
+ or `obs.get_forecast_env()` which has a maximum duration of 12 steps (previously it was only 1 step ahead forecast). This could be used in
+ `model based` strategy for example (see page :ref:`model_based_rl` for more examples)
+- a more complex opponent: the opponent can attack 3 lines in 3 different areas of the grid at the same time (instead of
+ being limited to just 1 attack)
+- more complex rules: in this environment to balance for the fact that the opponent can makes 3 attacks, the agent can also act on 3 different
+ powerlines and 3 different substation per step (one per area).
+
+The grid is split in 3 disctinct areas:
+
+|l2rpn_idf_2023_areas|
+
+Like most grid2op environment, it also has maintenance. Lines that can be in maintenance are:
+
+.. line_color_maint = np.zeros(env.n_line)
+.. line_in_maintenance = {'21_22_93', '39_41_121', '54_58_154', '17_18_88', '29_37_117',
+.. '91_92_37', '41_48_131', '80_79_175', '88_91_33', '48_50_136',
+.. '43_44_125', '12_14_68', '62_58_180', '44_45_126', '74_117_81',
+.. '26_31_106', '4_10_162', '93_95_43', '62_63_160', '48_53_141',
+.. '34_35_110'}
+.. line_in_maintenance = list(line_in_maintenance)
+
+.. line_color_maint[np.isin(env.name_line, line_in_maintenance) & np.isin(np.arange(env.n_line), lines_by_area[0])] = 1.0
+.. line_color_maint[np.isin(env.name_line, line_in_maintenance) & np.isin(np.arange(env.n_line), lines_by_area[1])] = 2.0
+.. line_color_maint[np.isin(env.name_line, line_in_maintenance) & np.isin(np.arange(env.n_line), lines_by_area[2])] = 3.0
+.. plot_helper._line_color_scheme = ["gray", "blue", "orange", "red"]
+.. _ = plot_helper.plot_info(line_values=line_color_maint, coloring="line")
+.. plot_helper.restore_line_palette()
+
+|l2rpn_idf_2023_maint|
+
+And the lines that can be attacked by the opponent are:
+
+.. attacked_lines = [106, 93, 88, 162, 68, 117, 180, 160, 136, 141, 131, 121, 125, 126, 110, 154, 81, 43, 33, 37, 62, 61]
+.. line_color_att = np.zeros(env.n_line)
+.. line_color_att[np.isin(np.arange(env.n_line), attacked_lines) & np.isin(np.arange(env.n_line), lines_by_area[0])] = 1.0
+.. line_color_att[np.isin(np.arange(env.n_line), attacked_lines) & np.isin(np.arange(env.n_line), lines_by_area[1])] = 2.0
+.. line_color_att[np.isin(np.arange(env.n_line), attacked_lines) & np.isin(np.arange(env.n_line), lines_by_area[2])] = 3.0
+.. plot_helper._line_color_scheme = ["gray", "blue", "orange", "red"]
+.. _ = plot_helper.plot_info(line_values=line_color_att, coloring="line")
+.. plot_helper.restore_line_palette()
+
+|l2rpn_idf_2023_att|
+
+
.. _l2rpn_wcci_2022:
l2rpn_wcci_2022
@@ -159,7 +241,7 @@ much data as you want with the :func:`grid2op.Environment.Environment.generate_d
env_name = "l2rpn_wcci_2022"
env = grid2op.make(env_name)
-It counts 118 substations, 186 powerlines, 91 loads and 62 loads. It will be used for the L2RPN competitions at WCCI in 2022.
+It counts 118 substations, 186 powerlines, 91 loads and 62 generators. It will be used for the L2RPN competitions at WCCI in 2022.
|l2rpn_wcci_2022_layout|
diff --git a/docs/conf.py b/docs/conf.py
index 9b2060d90..bf71370d0 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -22,7 +22,7 @@
author = 'Benjamin Donnot'
# The full version, including alpha/beta/rc tags
-release = '1.9.1'
+release = '1.9.2.dev0'
version = '1.9'
diff --git a/docs/img/l2rpn_idf_2023_areas.png b/docs/img/l2rpn_idf_2023_areas.png
new file mode 100644
index 000000000..9b576c254
Binary files /dev/null and b/docs/img/l2rpn_idf_2023_areas.png differ
diff --git a/docs/img/l2rpn_idf_2023_att.png b/docs/img/l2rpn_idf_2023_att.png
new file mode 100644
index 000000000..51ab07627
Binary files /dev/null and b/docs/img/l2rpn_idf_2023_att.png differ
diff --git a/docs/img/l2rpn_idf_2023_layout.png b/docs/img/l2rpn_idf_2023_layout.png
new file mode 100644
index 000000000..45c8c18cf
Binary files /dev/null and b/docs/img/l2rpn_idf_2023_layout.png differ
diff --git a/docs/img/l2rpn_idf_2023_maint.png b/docs/img/l2rpn_idf_2023_maint.png
new file mode 100644
index 000000000..2c7962c9d
Binary files /dev/null and b/docs/img/l2rpn_idf_2023_maint.png differ
diff --git a/docs/observation.rst b/docs/observation.rst
index d4b308999..86bc3baba 100644
--- a/docs/observation.rst
+++ b/docs/observation.rst
@@ -86,7 +86,7 @@ how well the past action
performed. The second main input received from the environment is the :class:`BaseObservation`. This is gives the BaseAgent
partial, noisy, or complete information about the current state of the environment. This module implement a generic
:class:`BaseObservation` class and an example of a complete observation in the case of the Learning
-To Run a Power Network (`l2RPN `_ ) competition.
+To Run a Power Network (`L2RPN `_ ) competition.
Compared to other Reinforcement Learning problems the L2PRN competition allows another flexibility. Today, when
operating a powergrid, operators have "forecasts" at their disposal. We wanted to make them available in the
diff --git a/grid2op/Action/_BackendAction.py b/grid2op/Action/_BackendAction.py
index 7f399f983..97de586fb 100644
--- a/grid2op/Action/_BackendAction.py
+++ b/grid2op/Action/_BackendAction.py
@@ -6,584 +6,8 @@
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
-import copy
-import numpy as np
+"""
+This file is here to maintain compatibility with lightsim2grid (but will disappear "soon")
+"""
-from grid2op.dtypes import dt_int, dt_bool, dt_float
-from grid2op.Space import GridObjects
-
-
-# TODO see if it can be done in c++ easily
-class ValueStore:
- """
- INTERNAL USE ONLY
-
- .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
-
- """
-
- def __init__(self, size, dtype):
- ## TODO at the init it's mandatory to have everything at "1" here
- # if topo is not "fully connected" it will not work
- self.values = np.ones(size, dtype=dtype)
- self.changed = np.full(size, dtype=dt_bool, fill_value=False)
- self.last_index = 0
- self.__size = size
-
- if issubclass(dtype, dt_int):
- self.set_val = self._set_val_int
- self.change_val = self._change_val_int
- elif issubclass(dtype, dt_float):
- self.set_val = self._set_val_float
- self.change_val = self._change_val_float
-
- def _set_val_float(self, newvals):
- changed_ = np.isfinite(newvals)
- self.changed[changed_] = True
- self.values[changed_] = newvals[changed_]
-
- def _set_val_int(self, newvals):
- changed_ = newvals != 0
- self.changed[changed_] = True
- self.values[changed_] = newvals[changed_]
-
- def _change_val_int(self, newvals):
- changed_ = newvals & (self.values > 0)
- self.changed[changed_] = True
- self.values[changed_] = (1 - self.values[changed_]) + 2
-
- def _change_val_float(self, newvals):
- changed_ = newvals != 0.0
- self.changed[changed_] = True
- self.values[changed_] += newvals[changed_]
-
- def reset(self):
- self.changed[:] = False
- self.last_index = 0
-
- def change_status(self, switch, lineor_id, lineex_id, old_vect):
- if not np.any(switch):
- # nothing is modified so i stop here
- return
-
- # changed
- changed_ = switch
-
- # make it to ids
- id_chg_or = lineor_id[changed_]
- id_chg_ex = lineex_id[changed_]
-
- self.changed[id_chg_or] = True
- self.changed[id_chg_ex] = True
-
- # disconnect the powerlines
- me_or_bus = self.values[id_chg_or]
- me_ex_bus = self.values[id_chg_ex]
- was_connected = (me_or_bus > 0) | (me_ex_bus > 0)
- was_disco = ~was_connected
-
- # it was connected, i disconnect it
- self.values[id_chg_or[was_connected]] = -1
- self.values[id_chg_ex[was_connected]] = -1
-
- # it was disconnected, i reconnect it
- reco_or = id_chg_or[was_disco]
- reco_ex = id_chg_ex[was_disco]
- self.values[reco_or] = old_vect[reco_or]
- self.values[reco_ex] = old_vect[reco_ex]
-
- def set_status(self, set_status, lineor_id, lineex_id, old_vect):
- id_or = lineor_id
- id_ex = lineex_id
-
- # disco
- disco_ = set_status == -1
- reco_ = set_status == 1
-
- # make it to ids
- id_reco_or = id_or[reco_]
- id_reco_ex = id_ex[reco_]
- id_disco_or = id_or[disco_]
- id_disco_ex = id_ex[disco_]
-
- self.changed[id_reco_or] = True
- self.changed[id_reco_ex] = True
- self.changed[id_disco_or] = True
- self.changed[id_disco_ex] = True
-
- # disconnect the powerlines
- self.values[id_disco_or] = -1
- self.values[id_disco_ex] = -1
-
- # reconnect the powerlines
- # don't consider powerlines that have been already changed with topology
- # ie reconnect to the old bus only powerline from which we don't know the status
- id_reco_or = id_reco_or[self.values[id_reco_or] < 0]
- id_reco_ex = id_reco_ex[self.values[id_reco_ex] < 0]
-
- self.values[id_reco_or] = old_vect[id_reco_or]
- self.values[id_reco_ex] = old_vect[id_reco_ex]
-
- def get_line_status(self, lineor_id, lineex_id):
- return self.values[lineor_id], self.values[lineex_id]
-
- def update_connected(self, current_values):
- indx_conn = current_values.values > 0
- self.values[indx_conn] = current_values.values[indx_conn]
-
- def all_changed(self):
- self.reset()
- self.changed[:] = True
-
- def __getitem__(self, item):
- return self.values[item]
-
- def __setitem__(self, key, value):
- self.values[key] = value
- self.changed[key] = value
-
- def __iter__(self):
- return self
-
- def __next__(self):
- res = None
- while self.last_index < self.values.shape[0]:
- if self.changed[self.last_index]:
- res = (self.last_index, self.values[self.last_index])
- self.last_index += 1
- if res is not None:
- break
- if res is not None:
- return res
- else:
- raise StopIteration
-
- def __len__(self):
- return self.__size
-
- def reorder(self, new_order):
- """reorder the element modified, this is use when converting backends only and should not be use
- outside of this usecase"""
- self.changed[:] = self.changed[new_order]
- self.values[:] = self.values[new_order]
-
- def copy_from_index(self, ref, index):
- self.reset()
- self.changed[:] = ref.changed[index]
- self.values[:] = ref.values[index]
-
- def __copy__(self):
- res = type(self)(self.values.shape[0], self.values.dtype.type)
- res.values[:] = self.values
- res.changed[:] = self.changed
- res.last_index = self.last_index
- res.__size = self.__size
- return res
-
- def __deepcopy__(self, memodict={}):
- res = type(self)(self.values.shape[0], self.values.dtype.type)
- res.values[:] = self.values
- res.changed[:] = self.changed
- res.last_index = self.last_index
- res.__size = self.__size
- return res
-
-
-class _BackendAction(GridObjects):
- """
- .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
-
- Internal class, use at your own risk.
-
- This class "digest" the players / environment / opponent / voltage controlers "action",
- and transform it to setpoint for the backend.
- """
-
- def __init__(self):
- GridObjects.__init__(self)
- # last connected registered
- self.last_topo_registered = ValueStore(self.dim_topo, dtype=dt_int)
-
- # topo at time t
- self.current_topo = ValueStore(self.dim_topo, dtype=dt_int)
-
- # injection at time t
- self.prod_p = ValueStore(self.n_gen, dtype=dt_float)
- self.prod_v = ValueStore(self.n_gen, dtype=dt_float)
- self.load_p = ValueStore(self.n_load, dtype=dt_float)
- self.load_q = ValueStore(self.n_load, dtype=dt_float)
- self.storage_power = ValueStore(self.n_storage, dtype=dt_float)
-
- self.activated_bus = np.full((self.n_sub, 2), dtype=dt_bool, fill_value=False)
- self.big_topo_to_subid = np.repeat(
- list(range(self.n_sub)), repeats=self.sub_info
- )
-
- # shunts
- if self.shunts_data_available:
- self.shunt_p = ValueStore(self.n_shunt, dtype=dt_float)
- self.shunt_q = ValueStore(self.n_shunt, dtype=dt_float)
- self.shunt_bus = ValueStore(self.n_shunt, dtype=dt_int)
-
- self._status_or_before = np.ones(self.n_line, dtype=dt_int)
- self._status_ex_before = np.ones(self.n_line, dtype=dt_int)
- self._status_or = np.ones(self.n_line, dtype=dt_int)
- self._status_ex = np.ones(self.n_line, dtype=dt_int)
-
- self._loads_bus = None
- self._gens_bus = None
- self._lines_or_bus = None
- self._lines_ex_bus = None
- self._storage_bus = None
-
- def __deepcopy__(self, memodict={}):
- res = type(self)()
- # last connected registered
- res.last_topo_registered = copy.deepcopy(self.last_topo_registered)
- res.current_topo = copy.deepcopy(self.current_topo)
- res.prod_p = copy.deepcopy(self.prod_p)
- res.prod_v = copy.deepcopy(self.prod_v)
- res.load_p = copy.deepcopy(self.load_p)
- res.load_q = copy.deepcopy(self.load_q)
- res.storage_power = copy.deepcopy(self.storage_power)
- res.activated_bus[:] = self.activated_bus
- res.big_topo_to_subid[:] = self.big_topo_to_subid
- if self.shunts_data_available:
- res.shunt_p = copy.deepcopy(self.shunt_p)
- res.shunt_q = copy.deepcopy(self.shunt_q)
- res.shunt_bus = copy.deepcopy(self.shunt_bus)
-
- res._status_or_before[:] = self._status_or_before
- res._status_ex_before[:] = self._status_ex_before
- res._status_or[:] = self._status_or
- res._status_ex[:] = self._status_ex
-
- res._loads_bus = copy.deepcopy(self._loads_bus)
- res._gens_bus = copy.deepcopy(self._gens_bus)
- res._lines_or_bus = copy.deepcopy(self._lines_or_bus)
- res._lines_ex_bus = copy.deepcopy(self._lines_ex_bus)
- res._storage_bus = copy.deepcopy(self._storage_bus)
- return res
-
- def __copy__(self):
- res = type(self)()
- # last connected registered
- res.last_topo_registered = copy.copy(self.last_topo_registered)
- res.current_topo = copy.copy(self.current_topo)
- res.prod_p = copy.copy(self.prod_p)
- res.prod_v = copy.copy(self.prod_v)
- res.load_p = copy.copy(self.load_p)
- res.load_q = copy.copy(self.load_q)
- res.storage_power = copy.copy(self.storage_power)
- res.activated_bus[:] = self.activated_bus
- res.big_topo_to_subid[:] = self.big_topo_to_subid
- if self.shunts_data_available:
- res.shunt_p = copy.copy(self.shunt_p)
- res.shunt_q = copy.copy(self.shunt_q)
- res.shunt_bus = copy.copy(self.shunt_bus)
-
- res._status_or_before[:] = self._status_or_before
- res._status_ex_before[:] = self._status_ex_before
- res._status_or[:] = self._status_or
- res._status_ex[:] = self._status_ex
-
- res._loads_bus = copy.copy(self._loads_bus)
- res._gens_bus = copy.copy(self._gens_bus)
- res._lines_or_bus = copy.copy(self._lines_or_bus)
- res._lines_ex_bus = copy.copy(self._lines_ex_bus)
- res._storage_bus = copy.copy(self._storage_bus)
- return res
-
- def reorder(self, no_load, no_gen, no_topo, no_storage, no_shunt):
- """
- .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
-
- reorder the element modified, this is use when converting backends only and should not be use
- outside of this usecase
-
- no_* stands for "new order"
- """
- self.last_topo_registered.reorder(no_topo)
- self.current_topo.reorder(no_topo)
- self.prod_p.reorder(no_gen)
- self.prod_v.reorder(no_gen)
- self.load_p.reorder(no_load)
- self.load_q.reorder(no_load)
-
- self.storage_power.reorder(no_storage)
-
- if self.shunts_data_available:
- self.shunt_p.reorder(no_shunt)
- self.shunt_q.reorder(no_shunt)
- self.shunt_bus.reorder(no_shunt)
-
- def reset(self):
- # last topo
- self.last_topo_registered.reset()
-
- # topo at time t
- self.current_topo.reset()
-
- # injection at time t
- self.prod_p.reset()
- self.prod_v.reset()
- self.load_p.reset()
- self.load_q.reset()
- self.storage_power.reset()
- # storage unit have their power reset to 0. each step
- self.storage_power.changed[:] = True
- self.storage_power.values[:] = 0.0
-
- # shunts
- if self.shunts_data_available:
- self.shunt_p.reset()
- self.shunt_q.reset()
- self.shunt_bus.reset()
-
- def all_changed(self):
- # last topo
- self.last_topo_registered.all_changed()
-
- # topo at time t
- self.current_topo.all_changed()
-
- # injection at time t
- self.prod_p.all_changed()
- self.prod_v.all_changed()
- self.load_p.all_changed()
- self.load_q.all_changed()
- self.storage_power.all_changed()
-
- # TODO handle shunts
- # shunts
- # if self.shunts_data_available:
- # self.shunt_p.all_changed()
- # self.shunt_q.all_changed()
- # self.shunt_bus.all_changed()
-
- def set_redispatch(self, new_redispatching):
- self.prod_p.change_val(new_redispatching)
-
- def __iadd__(self, other):
- """
- .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
-
- other: a grid2op action standard
-
- Parameters
- ----------
- other: :class:`grid2op.Action.BaseAction.BaseAction`
-
- Returns
- -------
-
- """
-
- dict_injection = other._dict_inj
- set_status = other._set_line_status
- switch_status = other._switch_line_status
- set_topo_vect = other._set_topo_vect
- switcth_topo_vect = other._change_bus_vect
- redispatching = other._redispatch
- storage_power = other._storage_power
-
- # I deal with injections
- # Ia set the injection
- if other._modif_inj:
- if "load_p" in dict_injection:
- tmp = dict_injection["load_p"]
- self.load_p.set_val(tmp)
- if "load_q" in dict_injection:
- tmp = dict_injection["load_q"]
- self.load_q.set_val(tmp)
- if "prod_p" in dict_injection:
- tmp = dict_injection["prod_p"]
- self.prod_p.set_val(tmp)
- if "prod_v" in dict_injection:
- tmp = dict_injection["prod_v"]
- self.prod_v.set_val(tmp)
-
- # Ib change the injection aka redispatching
- if other._modif_redispatch:
- self.prod_p.change_val(redispatching)
-
- # Ic storage unit
- if other._modif_storage:
- self.storage_power.set_val(storage_power)
-
- # II shunts
- if self.shunts_data_available:
- shunts = {}
- if other.shunts_data_available:
- shunts["shunt_p"] = other.shunt_p
- shunts["shunt_q"] = other.shunt_q
- shunts["shunt_bus"] = other.shunt_bus
-
- arr_ = shunts["shunt_p"]
- self.shunt_p.set_val(arr_)
- arr_ = shunts["shunt_q"]
- self.shunt_q.set_val(arr_)
- arr_ = shunts["shunt_bus"]
- self.shunt_bus.set_val(arr_)
-
- # III line status
- # this need to be done BEFORE the topology, as a connected powerline will be connected to their old bus.
- # regardless if the status is changed in the action or not.
- if other._modif_change_status:
- self.current_topo.change_status(
- switch_status,
- self.line_or_pos_topo_vect,
- self.line_ex_pos_topo_vect,
- self.last_topo_registered,
- )
- if other._modif_set_status:
- self.current_topo.set_status(
- set_status,
- self.line_or_pos_topo_vect,
- self.line_ex_pos_topo_vect,
- self.last_topo_registered,
- )
-
- # if other._modif_change_status or other._modif_set_status:
- (
- self._status_or_before[:],
- self._status_ex_before[:],
- ) = self.current_topo.get_line_status(
- self.line_or_pos_topo_vect, self.line_ex_pos_topo_vect
- )
-
- # IV topo
- if other._modif_change_bus:
- self.current_topo.change_val(switcth_topo_vect)
- if other._modif_set_bus:
- self.current_topo.set_val(set_topo_vect)
-
- # V Force disconnected status
- # of disconnected powerlines extremities
- self._status_or[:], self._status_ex[:] = self.current_topo.get_line_status(
- self.line_or_pos_topo_vect, self.line_ex_pos_topo_vect
- )
-
- # At least one disconnected extremity
- if other._modif_change_bus or other._modif_set_bus:
- disco_or = (self._status_or_before == -1) | (self._status_or == -1)
- disco_ex = (self._status_ex_before == -1) | (self._status_ex == -1)
- disco_now = (
- disco_or | disco_ex
- ) # a powerline is disconnected if at least one of its extremity is
- # added
- reco_or = (self._status_or_before == -1) & (self._status_or >= 1)
- reco_ex = (self._status_or_before == -1) & (self._status_ex >= 1)
- reco_now = reco_or | reco_ex
- # Set nothing
- set_now = np.zeros_like(self._status_or)
- # Force some disconnections
- set_now[disco_now] = -1
- set_now[reco_now] = 1
-
- self.current_topo.set_status(
- set_now,
- self.line_or_pos_topo_vect,
- self.line_ex_pos_topo_vect,
- self.last_topo_registered,
- )
-
- return self
-
- def __call__(self):
- injections = (
- self.prod_p,
- self.prod_v,
- self.load_p,
- self.load_q,
- self.storage_power,
- )
- topo = self.current_topo
- shunts = None
- if self.shunts_data_available:
- shunts = self.shunt_p, self.shunt_q, self.shunt_bus
- self._get_active_bus()
- return self.activated_bus, injections, topo, shunts
-
- def get_loads_bus(self):
- if self._loads_bus is None:
- self._loads_bus = ValueStore(self.n_load, dtype=dt_int)
- self._loads_bus.copy_from_index(self.current_topo, self.load_pos_topo_vect)
- return self._loads_bus
-
- def _aux_to_global(self, value_store, to_subid):
- value_store = copy.deepcopy(value_store)
- value_store.values = type(self).local_bus_to_global(value_store.values, to_subid)
- return value_store
-
- def get_loads_bus_global(self):
- tmp_ = self.get_loads_bus()
- return self._aux_to_global(tmp_, self.load_to_subid)
-
- def get_gens_bus(self):
- if self._gens_bus is None:
- self._gens_bus = ValueStore(self.n_gen, dtype=dt_int)
- self._gens_bus.copy_from_index(self.current_topo, self.gen_pos_topo_vect)
- return self._gens_bus
-
- def get_gens_bus_global(self):
- tmp_ = copy.deepcopy(self.get_gens_bus())
- return self._aux_to_global(tmp_, self.gen_to_subid)
-
- def get_lines_or_bus(self):
- if self._lines_or_bus is None:
- self._lines_or_bus = ValueStore(self.n_line, dtype=dt_int)
- self._lines_or_bus.copy_from_index(
- self.current_topo, self.line_or_pos_topo_vect
- )
- return self._lines_or_bus
-
- def get_lines_or_bus_global(self):
- tmp_ = self.get_lines_or_bus()
- return self._aux_to_global(tmp_, self.line_or_to_subid)
-
- def get_lines_ex_bus(self):
- if self._lines_ex_bus is None:
- self._lines_ex_bus = ValueStore(self.n_line, dtype=dt_int)
- self._lines_ex_bus.copy_from_index(
- self.current_topo, self.line_ex_pos_topo_vect
- )
- return self._lines_ex_bus
-
- def get_lines_ex_bus_global(self):
- tmp_ = self.get_lines_ex_bus()
- return self._aux_to_global(tmp_, self.line_ex_to_subid)
-
- def get_storages_bus(self):
- if self._storage_bus is None:
- self._storage_bus = ValueStore(self.n_storage, dtype=dt_int)
- self._storage_bus.copy_from_index(self.current_topo, self.storage_pos_topo_vect)
- return self._storage_bus
-
- def get_storages_bus_global(self):
- tmp_ = self.get_storages_bus()
- return self._aux_to_global(tmp_, self.storage_to_subid)
-
- def _get_active_bus(self):
- self.activated_bus[:] = False
- tmp = self.current_topo.values - 1
- self.activated_bus[self.big_topo_to_subid, tmp] = True
-
- def update_state(self, powerline_disconnected):
- """
- .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
-
- Update the internal state. Should be called after the cascading failures
-
- """
- if np.any(powerline_disconnected >= 0):
- arr_ = np.zeros(powerline_disconnected.shape, dtype=dt_int)
- arr_[powerline_disconnected >= 0] = -1
- self.current_topo.set_status(
- arr_,
- self.line_or_pos_topo_vect,
- self.line_ex_pos_topo_vect,
- self.last_topo_registered,
- )
- self.last_topo_registered.update_connected(self.current_topo)
- self.current_topo.reset()
+from grid2op.Action._backendAction import _BackendAction
diff --git a/grid2op/Action/__init__.py b/grid2op/Action/__init__.py
index 830011a18..d082e05e8 100644
--- a/grid2op/Action/__init__.py
+++ b/grid2op/Action/__init__.py
@@ -23,29 +23,29 @@
]
# Internals
-from grid2op.Action.BaseAction import BaseAction
-from grid2op.Action.PlayableAction import PlayableAction
-from grid2op.Action.VoltageOnlyAction import VoltageOnlyAction
-from grid2op.Action.CompleteAction import CompleteAction
-from grid2op.Action.ActionSpace import ActionSpace
-from grid2op.Action.SerializableActionSpace import SerializableActionSpace
+from grid2op.Action.baseAction import BaseAction
+from grid2op.Action.playableAction import PlayableAction
+from grid2op.Action.voltageOnlyAction import VoltageOnlyAction
+from grid2op.Action.completeAction import CompleteAction
+from grid2op.Action.actionSpace import ActionSpace
+from grid2op.Action.serializableActionSpace import SerializableActionSpace
-from grid2op.Action.DontAct import DontAct
-from grid2op.Action.PowerlineSetAction import PowerlineSetAction
-from grid2op.Action.PowerlineChangeAction import PowerlineChangeAction
-from grid2op.Action.PowerlineSetAndDispatchAction import PowerlineSetAndDispatchAction
-from grid2op.Action.PowerlineChangeAndDispatchAction import (
+from grid2op.Action.dontAct import DontAct
+from grid2op.Action.powerlineSetAction import PowerlineSetAction
+from grid2op.Action.powerlineChangeAction import PowerlineChangeAction
+from grid2op.Action.powerlineSetAndDispatchAction import PowerlineSetAndDispatchAction
+from grid2op.Action.powerlineChangeAndDispatchAction import (
PowerlineChangeAndDispatchAction,
)
-from grid2op.Action.PowerlineChangeDispatchAndStorageAction import (
+from grid2op.Action.powerlineChangeDispatchAndStorageAction import (
PowerlineChangeDispatchAndStorageAction,
)
-from grid2op.Action.TopologyAction import TopologyAction
-from grid2op.Action.TopologyAndDispatchAction import TopologyAndDispatchAction
-from grid2op.Action.TopologySetAction import TopologySetAction
-from grid2op.Action.TopologySetAndDispatchAction import TopologySetAndDispatchAction
-from grid2op.Action.TopologyChangeAction import TopologyChangeAction
-from grid2op.Action.TopologyChangeAndDispatchAction import (
+from grid2op.Action.topologyAction import TopologyAction
+from grid2op.Action.topologyAndDispatchAction import TopologyAndDispatchAction
+from grid2op.Action.topologySetAction import TopologySetAction
+from grid2op.Action.topologySetAndDispatchAction import TopologySetAndDispatchAction
+from grid2op.Action.topologyChangeAction import TopologyChangeAction
+from grid2op.Action.topologyChangeAndDispatchAction import (
TopologyChangeAndDispatchAction,
)
-from grid2op.Action.DispatchAction import DispatchAction
+from grid2op.Action.dispatchAction import DispatchAction
diff --git a/grid2op/Action/_backendAction.py b/grid2op/Action/_backendAction.py
new file mode 100644
index 000000000..b3f72b9a8
--- /dev/null
+++ b/grid2op/Action/_backendAction.py
@@ -0,0 +1,599 @@
+# Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
+# See AUTHORS.txt
+# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
+# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
+# you can obtain one at http://mozilla.org/MPL/2.0/.
+# SPDX-License-Identifier: MPL-2.0
+# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
+
+import copy
+import numpy as np
+
+from grid2op.dtypes import dt_int, dt_bool, dt_float
+from grid2op.Space import GridObjects
+
+
+# TODO see if it can be done in c++ easily
+class ValueStore:
+ """
+ INTERNAL USE ONLY
+
+ .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
+
+ """
+
+ def __init__(self, size, dtype):
+ ## TODO at the init it's mandatory to have everything at "1" here
+ # if topo is not "fully connected" it will not work
+ self.values = np.empty(size, dtype=dtype)
+ self.changed = np.full(size, dtype=dt_bool, fill_value=False)
+ self.last_index = 0
+ self.__size = size
+
+ if issubclass(dtype, dt_int):
+ self.set_val = self._set_val_int
+ self.change_val = self._change_val_int
+ elif issubclass(dtype, dt_float):
+ self.set_val = self._set_val_float
+ self.change_val = self._change_val_float
+
+ def _set_val_float(self, newvals):
+ changed_ = np.isfinite(newvals)
+ self.changed[changed_] = True
+ self.values[changed_] = newvals[changed_]
+
+ def _set_val_int(self, newvals):
+ changed_ = newvals != 0
+ self.changed[changed_] = True
+ self.values[changed_] = newvals[changed_]
+
+ def _change_val_int(self, newvals):
+ changed_ = newvals & (self.values > 0)
+ self.changed[changed_] = True
+ self.values[changed_] = (1 - self.values[changed_]) + 2
+
+ def _change_val_float(self, newvals):
+ changed_ = newvals != 0.0
+ self.changed[changed_] = True
+ self.values[changed_] += newvals[changed_]
+
+ def reset(self):
+ self.changed[:] = False
+ self.last_index = 0
+
+ def change_status(self, switch, lineor_id, lineex_id, old_vect):
+ if not switch.any():
+ # nothing is modified so i stop here
+ return
+
+ # changed
+ changed_ = switch
+
+ # make it to ids
+ id_chg_or = lineor_id[changed_]
+ id_chg_ex = lineex_id[changed_]
+
+ self.changed[id_chg_or] = True
+ self.changed[id_chg_ex] = True
+
+ # disconnect the powerlines
+ me_or_bus = self.values[id_chg_or]
+ me_ex_bus = self.values[id_chg_ex]
+ was_connected = (me_or_bus > 0) | (me_ex_bus > 0)
+ was_disco = ~was_connected
+
+ # it was connected, i disconnect it
+ self.values[id_chg_or[was_connected]] = -1
+ self.values[id_chg_ex[was_connected]] = -1
+
+ # it was disconnected, i reconnect it
+ reco_or = id_chg_or[was_disco]
+ reco_ex = id_chg_ex[was_disco]
+ self.values[reco_or] = old_vect[reco_or]
+ self.values[reco_ex] = old_vect[reco_ex]
+
+ def set_status(self, set_status, lineor_id, lineex_id, old_vect):
+ id_or = lineor_id
+ id_ex = lineex_id
+
+ # disco
+ disco_ = set_status == -1
+ reco_ = set_status == 1
+
+ # make it to ids
+ id_reco_or = id_or[reco_]
+ id_reco_ex = id_ex[reco_]
+ id_disco_or = id_or[disco_]
+ id_disco_ex = id_ex[disco_]
+
+ self.changed[id_reco_or] = True
+ self.changed[id_reco_ex] = True
+ self.changed[id_disco_or] = True
+ self.changed[id_disco_ex] = True
+
+ # disconnect the powerlines
+ self.values[id_disco_or] = -1
+ self.values[id_disco_ex] = -1
+
+ # reconnect the powerlines
+ # don't consider powerlines that have been already changed with topology
+ # ie reconnect to the old bus only powerline from which we don't know the status
+ id_reco_or = id_reco_or[self.values[id_reco_or] < 0]
+ id_reco_ex = id_reco_ex[self.values[id_reco_ex] < 0]
+
+ self.values[id_reco_or] = old_vect[id_reco_or]
+ self.values[id_reco_ex] = old_vect[id_reco_ex]
+
+ def get_line_status(self, lineor_id, lineex_id):
+ return self.values[lineor_id], self.values[lineex_id]
+
+ def update_connected(self, current_values):
+ indx_conn = current_values.values > 0
+ self.values[indx_conn] = current_values.values[indx_conn]
+
+ def all_changed(self):
+ self.reset()
+ self.changed[:] = True
+
+ def __getitem__(self, item):
+ return self.values[item]
+
+ def __setitem__(self, key, value):
+ self.values[key] = value
+ self.changed[key] = value
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ res = None
+ while self.last_index < self.values.shape[0]:
+ if self.changed[self.last_index]:
+ res = (self.last_index, self.values[self.last_index])
+ self.last_index += 1
+ if res is not None:
+ break
+ if res is not None:
+ return res
+ else:
+ raise StopIteration
+
+ def __len__(self):
+ return self.__size
+
+ def reorder(self, new_order):
+ """reorder the element modified, this is use when converting backends only and should not be use
+ outside of this usecase"""
+ self.changed[:] = self.changed[new_order]
+ self.values[:] = self.values[new_order]
+
+ def copy_from_index(self, ref, index):
+ self.reset()
+ self.changed[:] = ref.changed[index]
+ self.values[:] = ref.values[index]
+
+ def __copy__(self):
+ res = type(self)(self.values.shape[0], self.values.dtype.type)
+ res.values[:] = self.values
+ res.changed[:] = self.changed
+ res.last_index = self.last_index
+ res.__size = self.__size
+ return res
+
+ def __deepcopy__(self, memodict={}):
+ res = type(self)(self.values.shape[0], self.values.dtype.type)
+ res.values[:] = self.values
+ res.changed[:] = self.changed
+ res.last_index = self.last_index
+ res.__size = self.__size
+ return res
+
+ def copy(self, other):
+ """deepcopy, shallow or deep, without having to initialize everything again"""
+ self.values[:] = other.values
+ self.changed[:] = other.changed
+ self.last_index = other.last_index
+ self.__size = other.__size
+
+ def force_unchanged(self, mask, local_bus):
+ to_unchanged = local_bus == -1
+ to_unchanged[~mask] = False
+ self.changed[to_unchanged] = False
+
+
+class _BackendAction(GridObjects):
+ """
+ .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
+
+ Internal class, use at your own risk.
+
+ This class "digest" the players / environment / opponent / voltage controlers "action",
+ and transform it to setpoint for the backend.
+ """
+
+ def __init__(self):
+ GridObjects.__init__(self)
+ # last connected registered
+ self.last_topo_registered = ValueStore(self.dim_topo, dtype=dt_int)
+
+ # topo at time t
+ self.current_topo = ValueStore(self.dim_topo, dtype=dt_int)
+
+ # by default everything is on busbar 1
+ self.last_topo_registered.values[:] = 1
+ self.current_topo.values[:] = 1
+
+ # injection at time t
+ self.prod_p = ValueStore(self.n_gen, dtype=dt_float)
+ self.prod_v = ValueStore(self.n_gen, dtype=dt_float)
+ self.load_p = ValueStore(self.n_load, dtype=dt_float)
+ self.load_q = ValueStore(self.n_load, dtype=dt_float)
+ self.storage_power = ValueStore(self.n_storage, dtype=dt_float)
+
+ self.activated_bus = np.full((self.n_sub, 2), dtype=dt_bool, fill_value=False)
+ self.big_topo_to_subid = np.repeat(
+ list(range(self.n_sub)), repeats=self.sub_info
+ )
+
+ # shunts
+ if self.shunts_data_available:
+ self.shunt_p = ValueStore(self.n_shunt, dtype=dt_float)
+ self.shunt_q = ValueStore(self.n_shunt, dtype=dt_float)
+ self.shunt_bus = ValueStore(self.n_shunt, dtype=dt_int)
+
+ self._status_or_before = np.ones(self.n_line, dtype=dt_int)
+ self._status_ex_before = np.ones(self.n_line, dtype=dt_int)
+ self._status_or = np.ones(self.n_line, dtype=dt_int)
+ self._status_ex = np.ones(self.n_line, dtype=dt_int)
+
+ self._loads_bus = None
+ self._gens_bus = None
+ self._lines_or_bus = None
+ self._lines_ex_bus = None
+ self._storage_bus = None
+
+ def __deepcopy__(self, memodict={}):
+ res = type(self)()
+ # last connected registered
+ res.last_topo_registered.copy(self.last_topo_registered)
+ res.current_topo.copy(self.current_topo)
+ res.prod_p.copy(self.prod_p)
+ res.prod_v.copy(self.prod_v)
+ res.load_p.copy(self.load_p)
+ res.load_q.copy(self.load_q)
+ res.storage_power.copy(self.storage_power)
+ res.activated_bus[:, :] = self.activated_bus
+ # res.big_topo_to_subid[:] = self.big_topo_to_subid # cste
+ if self.shunts_data_available:
+ res.shunt_p.copy(self.shunt_p)
+ res.shunt_q.copy(self.shunt_q)
+ res.shunt_bus.copy(self.shunt_bus)
+
+ res._status_or_before[:] = self._status_or_before
+ res._status_ex_before[:] = self._status_ex_before
+ res._status_or[:] = self._status_or
+ res._status_ex[:] = self._status_ex
+
+ res._loads_bus = copy.deepcopy(self._loads_bus)
+ res._gens_bus = copy.deepcopy(self._gens_bus)
+ res._lines_or_bus = copy.deepcopy(self._lines_or_bus)
+ res._lines_ex_bus = copy.deepcopy(self._lines_ex_bus)
+ res._storage_bus = copy.deepcopy(self._storage_bus)
+
+ return res
+
+ def __copy__(self):
+ res = self.__deepcopy__() # nothing less to do
+ return res
+
+ def reorder(self, no_load, no_gen, no_topo, no_storage, no_shunt):
+ """
+ .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
+
+ reorder the element modified, this is use when converting backends only and should not be use
+ outside of this usecase
+
+ no_* stands for "new order"
+ """
+ self.last_topo_registered.reorder(no_topo)
+ self.current_topo.reorder(no_topo)
+ self.prod_p.reorder(no_gen)
+ self.prod_v.reorder(no_gen)
+ self.load_p.reorder(no_load)
+ self.load_q.reorder(no_load)
+
+ self.storage_power.reorder(no_storage)
+
+ if self.shunts_data_available:
+ self.shunt_p.reorder(no_shunt)
+ self.shunt_q.reorder(no_shunt)
+ self.shunt_bus.reorder(no_shunt)
+
+ def reset(self):
+ # last topo
+ self.last_topo_registered.reset()
+
+ # topo at time t
+ self.current_topo.reset()
+
+ # injection at time t
+ self.prod_p.reset()
+ self.prod_v.reset()
+ self.load_p.reset()
+ self.load_q.reset()
+ self.storage_power.reset()
+ # storage unit have their power reset to 0. each step
+ self.storage_power.changed[:] = True
+ self.storage_power.values[:] = 0.0
+
+ # shunts
+ if self.shunts_data_available:
+ self.shunt_p.reset()
+ self.shunt_q.reset()
+ self.shunt_bus.reset()
+
+ def all_changed(self):
+ # last topo
+ self.last_topo_registered.all_changed()
+
+ # topo at time t
+ self.current_topo.all_changed()
+
+ # injection at time t
+ self.prod_p.all_changed()
+ self.prod_v.all_changed()
+ self.load_p.all_changed()
+ self.load_q.all_changed()
+ self.storage_power.all_changed()
+
+ # TODO handle shunts
+ # shunts
+ # if self.shunts_data_available:
+ # self.shunt_p.all_changed()
+ # self.shunt_q.all_changed()
+ # self.shunt_bus.all_changed()
+
+ def set_redispatch(self, new_redispatching):
+ self.prod_p.change_val(new_redispatching)
+
+ def __iadd__(self, other):
+ """
+ .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
+
+ other: a grid2op action standard
+
+ Parameters
+ ----------
+ other: :class:`grid2op.Action.BaseAction.BaseAction`
+
+ Returns
+ -------
+
+ """
+
+ dict_injection = other._dict_inj
+ set_status = other._set_line_status
+ switch_status = other._switch_line_status
+ set_topo_vect = other._set_topo_vect
+ switcth_topo_vect = other._change_bus_vect
+ redispatching = other._redispatch
+ storage_power = other._storage_power
+
+ # I deal with injections
+ # Ia set the injection
+ if other._modif_inj:
+ if "load_p" in dict_injection:
+ tmp = dict_injection["load_p"]
+ self.load_p.set_val(tmp)
+ if "load_q" in dict_injection:
+ tmp = dict_injection["load_q"]
+ self.load_q.set_val(tmp)
+ if "prod_p" in dict_injection:
+ tmp = dict_injection["prod_p"]
+ self.prod_p.set_val(tmp)
+ if "prod_v" in dict_injection:
+ tmp = dict_injection["prod_v"]
+ self.prod_v.set_val(tmp)
+
+ # Ib change the injection aka redispatching
+ if other._modif_redispatch:
+ self.prod_p.change_val(redispatching)
+
+ # Ic storage unit
+ if other._modif_storage:
+ self.storage_power.set_val(storage_power)
+
+ # II shunts
+ if self.shunts_data_available:
+ shunts = {}
+ if other.shunts_data_available:
+ shunts["shunt_p"] = other.shunt_p
+ shunts["shunt_q"] = other.shunt_q
+ shunts["shunt_bus"] = other.shunt_bus
+
+ arr_ = shunts["shunt_p"]
+ self.shunt_p.set_val(arr_)
+ arr_ = shunts["shunt_q"]
+ self.shunt_q.set_val(arr_)
+ arr_ = shunts["shunt_bus"]
+ self.shunt_bus.set_val(arr_)
+
+ # III line status
+ # this need to be done BEFORE the topology, as a connected powerline will be connected to their old bus.
+ # regardless if the status is changed in the action or not.
+ if other._modif_change_status:
+ self.current_topo.change_status(
+ switch_status,
+ self.line_or_pos_topo_vect,
+ self.line_ex_pos_topo_vect,
+ self.last_topo_registered,
+ )
+ if other._modif_set_status:
+ self.current_topo.set_status(
+ set_status,
+ self.line_or_pos_topo_vect,
+ self.line_ex_pos_topo_vect,
+ self.last_topo_registered,
+ )
+
+ # if other._modif_change_status or other._modif_set_status:
+ (
+ self._status_or_before[:],
+ self._status_ex_before[:],
+ ) = self.current_topo.get_line_status(
+ self.line_or_pos_topo_vect, self.line_ex_pos_topo_vect
+ )
+
+ # IV topo
+ if other._modif_change_bus:
+ self.current_topo.change_val(switcth_topo_vect)
+ if other._modif_set_bus:
+ self.current_topo.set_val(set_topo_vect)
+
+ # V Force disconnected status
+ # of disconnected powerlines extremities
+ self._status_or[:], self._status_ex[:] = self.current_topo.get_line_status(
+ self.line_or_pos_topo_vect, self.line_ex_pos_topo_vect
+ )
+
+ # At least one disconnected extremity
+ if other._modif_change_bus or other._modif_set_bus:
+ disco_or = (self._status_or_before == -1) | (self._status_or == -1)
+ disco_ex = (self._status_ex_before == -1) | (self._status_ex == -1)
+ disco_now = (
+ disco_or | disco_ex
+ ) # a powerline is disconnected if at least one of its extremity is
+ # added
+ reco_or = (self._status_or_before == -1) & (self._status_or >= 1)
+ reco_ex = (self._status_or_before == -1) & (self._status_ex >= 1)
+ reco_now = reco_or | reco_ex
+ # Set nothing
+ set_now = np.zeros_like(self._status_or)
+ # Force some disconnections
+ set_now[disco_now] = -1
+ set_now[reco_now] = 1
+
+ self.current_topo.set_status(
+ set_now,
+ self.line_or_pos_topo_vect,
+ self.line_ex_pos_topo_vect,
+ self.last_topo_registered,
+ )
+
+ return self
+
+ def _assign_0_to_disco_el(self):
+ """do not consider disconnected elements are modified for there active / reactive / voltage values"""
+ gen_changed = self.current_topo.changed[type(self).gen_pos_topo_vect]
+ gen_bus = self.current_topo.values[type(self).gen_pos_topo_vect]
+ self.prod_p.force_unchanged(gen_changed, gen_bus)
+ self.prod_v.force_unchanged(gen_changed, gen_bus)
+
+ load_changed = self.current_topo.changed[type(self).load_pos_topo_vect]
+ load_bus = self.current_topo.values[type(self).load_pos_topo_vect]
+ self.load_p.force_unchanged(load_changed, load_bus)
+ self.load_q.force_unchanged(load_changed, load_bus)
+
+ sto_changed = self.current_topo.changed[type(self).storage_pos_topo_vect]
+ sto_bus = self.current_topo.values[type(self).storage_pos_topo_vect]
+ self.storage_power.force_unchanged(sto_changed, sto_bus)
+
+ def __call__(self):
+ self._assign_0_to_disco_el()
+ injections = (
+ self.prod_p,
+ self.prod_v,
+ self.load_p,
+ self.load_q,
+ self.storage_power,
+ )
+ topo = self.current_topo
+ shunts = None
+ if self.shunts_data_available:
+ shunts = self.shunt_p, self.shunt_q, self.shunt_bus
+ self._get_active_bus()
+ return self.activated_bus, injections, topo, shunts
+
+ def get_loads_bus(self):
+ if self._loads_bus is None:
+ self._loads_bus = ValueStore(self.n_load, dtype=dt_int)
+ self._loads_bus.copy_from_index(self.current_topo, self.load_pos_topo_vect)
+ return self._loads_bus
+
+ def _aux_to_global(self, value_store, to_subid):
+ value_store = copy.deepcopy(value_store)
+ value_store.values = type(self).local_bus_to_global(value_store.values, to_subid)
+ return value_store
+
+ def get_loads_bus_global(self):
+ tmp_ = self.get_loads_bus()
+ return self._aux_to_global(tmp_, self.load_to_subid)
+
+ def get_gens_bus(self):
+ if self._gens_bus is None:
+ self._gens_bus = ValueStore(self.n_gen, dtype=dt_int)
+ self._gens_bus.copy_from_index(self.current_topo, self.gen_pos_topo_vect)
+ return self._gens_bus
+
+ def get_gens_bus_global(self):
+ tmp_ = copy.deepcopy(self.get_gens_bus())
+ return self._aux_to_global(tmp_, self.gen_to_subid)
+
+ def get_lines_or_bus(self):
+ if self._lines_or_bus is None:
+ self._lines_or_bus = ValueStore(self.n_line, dtype=dt_int)
+ self._lines_or_bus.copy_from_index(
+ self.current_topo, self.line_or_pos_topo_vect
+ )
+ return self._lines_or_bus
+
+ def get_lines_or_bus_global(self):
+ tmp_ = self.get_lines_or_bus()
+ return self._aux_to_global(tmp_, self.line_or_to_subid)
+
+ def get_lines_ex_bus(self):
+ if self._lines_ex_bus is None:
+ self._lines_ex_bus = ValueStore(self.n_line, dtype=dt_int)
+ self._lines_ex_bus.copy_from_index(
+ self.current_topo, self.line_ex_pos_topo_vect
+ )
+ return self._lines_ex_bus
+
+ def get_lines_ex_bus_global(self):
+ tmp_ = self.get_lines_ex_bus()
+ return self._aux_to_global(tmp_, self.line_ex_to_subid)
+
+ def get_storages_bus(self):
+ if self._storage_bus is None:
+ self._storage_bus = ValueStore(self.n_storage, dtype=dt_int)
+ self._storage_bus.copy_from_index(self.current_topo, self.storage_pos_topo_vect)
+ return self._storage_bus
+
+ def get_storages_bus_global(self):
+ tmp_ = self.get_storages_bus()
+ return self._aux_to_global(tmp_, self.storage_to_subid)
+
+ def _get_active_bus(self):
+ self.activated_bus[:, :] = False
+ tmp = self.current_topo.values - 1
+ is_el_conn = tmp >= 0
+ self.activated_bus[self.big_topo_to_subid[is_el_conn], tmp[is_el_conn]] = True
+
+ def update_state(self, powerline_disconnected):
+ """
+ .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
+
+ Update the internal state. Should be called after the cascading failures
+
+ """
+ if (powerline_disconnected >= 0).any():
+ arr_ = np.zeros(powerline_disconnected.shape, dtype=dt_int)
+ arr_[powerline_disconnected >= 0] = -1
+ self.current_topo.set_status(
+ arr_,
+ self.line_or_pos_topo_vect,
+ self.line_ex_pos_topo_vect,
+ self.last_topo_registered,
+ )
+ self.last_topo_registered.update_connected(self.current_topo)
+ self.current_topo.reset()
diff --git a/grid2op/Action/ActionSpace.py b/grid2op/Action/actionSpace.py
similarity index 98%
rename from grid2op/Action/ActionSpace.py
rename to grid2op/Action/actionSpace.py
index 73b06e722..c7ade19a7 100644
--- a/grid2op/Action/ActionSpace.py
+++ b/grid2op/Action/actionSpace.py
@@ -9,8 +9,8 @@
import warnings
import copy
-from grid2op.Action.BaseAction import BaseAction
-from grid2op.Action.SerializableActionSpace import SerializableActionSpace
+from grid2op.Action.baseAction import BaseAction
+from grid2op.Action.serializableActionSpace import SerializableActionSpace
class ActionSpace(SerializableActionSpace):
diff --git a/grid2op/Action/BaseAction.py b/grid2op/Action/baseAction.py
similarity index 97%
rename from grid2op/Action/BaseAction.py
rename to grid2op/Action/baseAction.py
index 0c1c4ed9b..17c680e6c 100644
--- a/grid2op/Action/BaseAction.py
+++ b/grid2op/Action/baseAction.py
@@ -676,15 +676,15 @@ def as_serializable_dict(self) -> dict:
if type(self).shunts_data_available:
res["shunt"] = {}
- if np.any(np.isfinite(self.shunt_p)):
+ if np.isfinite(self.shunt_p).any():
res["shunt"]["shunt_p"] = [
(int(sh_id), float(val)) for sh_id, val in enumerate(self.shunt_p) if np.isfinite(val)
]
- if np.any(np.isfinite(self.shunt_q)):
+ if np.isfinite(self.shunt_q).any():
res["shunt"]["shunt_q"] = [
(int(sh_id), float(val)) for sh_id, val in enumerate(self.shunt_q) if np.isfinite(val)
]
- if np.any(self.shunt_bus != 0):
+ if (self.shunt_bus != 0).any():
res["shunt"]["shunt_bus"] = [
(int(sh_id), int(val))
for sh_id, val in enumerate(self.shunt_bus)
@@ -825,17 +825,17 @@ def _get_array_from_attr_name(self, attr_name):
def _post_process_from_vect(self):
self._modif_inj = self._dict_inj != {}
- self._modif_set_bus = np.any(self._set_topo_vect != 0)
- self._modif_change_bus = np.any(self._change_bus_vect)
- self._modif_set_status = np.any(self._set_line_status != 0)
- self._modif_change_status = np.any(self._switch_line_status)
- self._modif_redispatch = np.any(
+ self._modif_set_bus = (self._set_topo_vect != 0).any()
+ self._modif_change_bus = (self._change_bus_vect).any()
+ self._modif_set_status = (self._set_line_status != 0).any()
+ self._modif_change_status = (self._switch_line_status).any()
+ self._modif_redispatch = (
np.isfinite(self._redispatch) & (self._redispatch != 0.0)
- )
- self._modif_storage = np.any(self._storage_power != 0.0)
- self._modif_curtailment = np.any(self._curtail != -1.0)
- self._modif_alarm = np.any(self._raise_alarm)
- self._modif_alert = np.any(self._raise_alert)
+ ).any()
+ self._modif_storage = (self._storage_power != 0.0).any()
+ self._modif_curtailment = (self._curtail != -1.0).any()
+ self._modif_alarm = self._raise_alarm.any()
+ self._modif_alert = self._raise_alert.any()
def _assign_attr_from_name(self, attr_nm, vect):
if hasattr(self, attr_nm):
@@ -846,9 +846,8 @@ def _assign_attr_from_name(self, attr_nm, vect):
super()._assign_attr_from_name(attr_nm, vect)
self._post_process_from_vect()
else:
- if np.any(np.isfinite(vect)):
- if np.any(vect != 0.0):
- self._dict_inj[attr_nm] = vect
+ if np.isfinite(vect).any() and (vect != 0.0).any():
+ self._dict_inj[attr_nm] = vect
def check_space_legit(self):
"""
@@ -1029,17 +1028,17 @@ def __eq__(self, other) -> bool:
return False
is_ok_me = np.isfinite(self.shunt_p)
is_ok_ot = np.isfinite(other.shunt_p)
- if np.any(is_ok_me != is_ok_ot):
+ if (is_ok_me != is_ok_ot).any():
return False
- if not np.all(self.shunt_p[is_ok_me] == other.shunt_p[is_ok_ot]):
+ if not (self.shunt_p[is_ok_me] == other.shunt_p[is_ok_ot]).all():
return False
is_ok_me = np.isfinite(self.shunt_q)
is_ok_ot = np.isfinite(other.shunt_q)
- if np.any(is_ok_me != is_ok_ot):
+ if (is_ok_me != is_ok_ot).any():
return False
- if not np.all(self.shunt_q[is_ok_me] == other.shunt_q[is_ok_ot]):
+ if not (self.shunt_q[is_ok_me] == other.shunt_q[is_ok_ot]).all():
return False
- if not np.all(self.shunt_bus == other.shunt_bus):
+ if not (self.shunt_bus == other.shunt_bus).all():
return False
return True
@@ -1414,7 +1413,7 @@ def _assign_iadd_or_warn(self, attr_name, new_value):
old_is_finite = np.isfinite(old_value)
new_finite = new_value[new_is_finite | old_is_finite]
old_finite = old_value[new_is_finite | old_is_finite]
- if np.any(new_finite != old_finite):
+ if (new_finite != old_finite).any():
warnings.warn(
type(self).ERR_ACTION_CUT.format(attr_name)
)
@@ -1473,7 +1472,7 @@ def __iadd__(self, other):
)
# redispatching
redispatching = other._redispatch
- if np.any(redispatching != 0.0):
+ if (redispatching != 0.0).any():
if "_redispatch" not in self.attr_list_set:
warnings.warn(
type(self).ERR_ACTION_CUT.format("_redispatch")
@@ -1484,8 +1483,8 @@ def __iadd__(self, other):
# storage
set_storage = other._storage_power
- ok_ind = np.isfinite(set_storage) & np.any(set_storage != 0.0)
- if np.any(ok_ind):
+ ok_ind = np.isfinite(set_storage) & (set_storage != 0.0).any()
+ if ok_ind.any():
if "_storage_power" not in self.attr_list_set:
warnings.warn(
type(self).ERR_ACTION_CUT.format("_storage_power")
@@ -1496,7 +1495,7 @@ def __iadd__(self, other):
# curtailment
curtailment = other._curtail
ok_ind = np.isfinite(curtailment) & (curtailment != -1.0)
- if np.any(ok_ind):
+ if ok_ind.any():
if "_curtail" not in self.attr_list_set:
warnings.warn(
type(self).ERR_ACTION_CUT.format("_curtail")
@@ -2222,7 +2221,7 @@ def _check_for_correct_modif_flags(self):
)
if "injection" not in self.authorized_keys:
raise IllegalAction("You illegally act on the injection")
- if np.any(self._change_bus_vect):
+ if self._change_bus_vect.any():
if not self._modif_change_bus:
raise AmbiguousAction(
"A action of type change_bus is performed while the appropriate flag is not "
@@ -2231,7 +2230,7 @@ def _check_for_correct_modif_flags(self):
)
if "change_bus" not in self.authorized_keys:
raise IllegalAction("You illegally act on the bus (using change)")
- if np.any(self._set_topo_vect != 0):
+ if (self._set_topo_vect != 0).any():
if not self._modif_set_bus:
raise AmbiguousAction(
"A action of type set_bus is performed while the appropriate flag is not "
@@ -2241,7 +2240,7 @@ def _check_for_correct_modif_flags(self):
if "set_bus" not in self.authorized_keys:
raise IllegalAction("You illegally act on the bus (using set)")
- if np.any(self._set_line_status != 0):
+ if (self._set_line_status != 0).any():
if not self._modif_set_status:
raise AmbiguousAction(
"A action of type line_set_status is performed while the appropriate flag is not "
@@ -2254,7 +2253,7 @@ def _check_for_correct_modif_flags(self):
"You illegally act on the powerline status (using set)"
)
- if np.any(self._switch_line_status):
+ if (self._switch_line_status).any():
if not self._modif_change_status:
raise AmbiguousAction(
"A action of type line_change_status is performed while the appropriate flag "
@@ -2267,7 +2266,7 @@ def _check_for_correct_modif_flags(self):
"You illegally act on the powerline status (using change)"
)
- if np.any(self._redispatch != 0.0):
+ if (self._redispatch != 0.0).any():
if not self._modif_redispatch:
raise AmbiguousAction(
"A action of type redispatch is performed while the appropriate flag "
@@ -2278,7 +2277,7 @@ def _check_for_correct_modif_flags(self):
if "redispatch" not in self.authorized_keys:
raise IllegalAction("You illegally act on the redispatching")
- if np.any(self._storage_power != 0.0):
+ if (self._storage_power != 0.0).any():
if not self._modif_storage:
raise AmbiguousAction(
"A action on the storage unit is performed while the appropriate flag "
@@ -2289,7 +2288,7 @@ def _check_for_correct_modif_flags(self):
if "set_storage" not in self.authorized_keys:
raise IllegalAction("You illegally act on the storage unit")
- if np.any(self._curtail != -1.0):
+ if (self._curtail != -1.0).any():
if not self._modif_curtailment:
raise AmbiguousAction(
"A curtailment is performed while the action is not supposed to have done so. "
@@ -2298,7 +2297,7 @@ def _check_for_correct_modif_flags(self):
if "curtail" not in self.authorized_keys:
raise IllegalAction("You illegally act on the curtailment")
- if np.any(self._raise_alarm):
+ if (self._raise_alarm).any():
if not self._modif_alarm:
raise AmbiguousAction(
"Incorrect way to raise some alarm, the appropriate flag is not "
@@ -2307,7 +2306,7 @@ def _check_for_correct_modif_flags(self):
if "raise_alarm" not in self.authorized_keys:
raise IllegalAction("You illegally send an alarm.")
- if np.any(self._raise_alert):
+ if (self._raise_alert).any():
if not self._modif_alert:
raise AmbiguousActionRaiseAlert(
"Incorrect way to raise some alert, the appropriate flag is not "
@@ -2374,7 +2373,7 @@ def _check_for_ambiguity(self):
if (
self._modif_change_status
and self._modif_set_status
- and np.any(self._set_line_status[self._switch_line_status] != 0)
+ and (self._set_line_status[self._switch_line_status] != 0).any()
):
raise InvalidLineStatus(
"You asked to change the status (connected / disconnected) of a powerline by"
@@ -2446,17 +2445,17 @@ def _check_for_ambiguity(self):
"environment. Please set up the proper costs for generator"
)
- if np.any(self._redispatch[~self.gen_redispatchable] != 0.0):
+ if (self._redispatch[~self.gen_redispatchable] != 0.0).any():
raise InvalidRedispatching(
"Trying to apply a redispatching action on a non redispatchable generator"
)
if self._single_act:
- if np.any(self._redispatch > self.gen_max_ramp_up):
+ if (self._redispatch > self.gen_max_ramp_up).any():
raise InvalidRedispatching(
"Some redispatching amount are above the maximum ramp up"
)
- if np.any(-self._redispatch > self.gen_max_ramp_down):
+ if (-self._redispatch > self.gen_max_ramp_down).any():
raise InvalidRedispatching(
"Some redispatching amount are bellow the maximum ramp down"
)
@@ -2465,12 +2464,12 @@ def _check_for_ambiguity(self):
new_p = self._dict_inj["prod_p"]
tmp_p = new_p + self._redispatch
indx_ok = np.isfinite(new_p)
- if np.any(tmp_p[indx_ok] > self.gen_pmax[indx_ok]):
+ if (tmp_p[indx_ok] > self.gen_pmax[indx_ok]).any():
raise InvalidRedispatching(
"Some redispatching amount, cumulated with the production setpoint, "
"are above pmax for some generator."
)
- if np.any(tmp_p[indx_ok] < self.gen_pmin[indx_ok]):
+ if (tmp_p[indx_ok] < self.gen_pmin[indx_ok]).any():
raise InvalidRedispatching(
"Some redispatching amount, cumulated with the production setpoint, "
"are below pmin for some generator."
@@ -2486,20 +2485,20 @@ def _check_for_ambiguity(self):
if (
self._modif_set_bus
and self._modif_change_bus
- and np.any(self._set_topo_vect[self._change_bus_vect] != 0)
+ and (self._set_topo_vect[self._change_bus_vect] != 0).any()
):
raise InvalidBusStatus(
"You asked to change the bus of an object with"
' using the keyword "change_bus" and set this same object state in "set_bus"'
". This ambiguous behaviour is not supported"
)
- if self._modif_set_bus and np.any(self._set_topo_vect < -1):
+ if self._modif_set_bus and (self._set_topo_vect < -1).any():
raise InvalidBusStatus(
"Invalid set_bus. Buses should be either -1 (disconnect), 0 (change nothing),"
"1 (assign this object to bus one) or 2 (assign this object to bus"
"2). A negative number has been found."
)
- if self._modif_set_bus and np.any(self._set_topo_vect > 2):
+ if self._modif_set_bus and (self._set_topo_vect > 2).any():
raise InvalidBusStatus(
"Invalid set_bus. Buses should be either -1 (disconnect), 0 (change nothing),"
"1 (assign this object to bus one) or 2 (assign this object to bus"
@@ -2526,13 +2525,13 @@ def _check_for_ambiguity(self):
if self._modif_set_bus:
disco_or = self._set_topo_vect[self.line_or_pos_topo_vect] == -1
- if np.any(self._set_topo_vect[self.line_ex_pos_topo_vect][disco_or] > 0):
+ if (self._set_topo_vect[self.line_ex_pos_topo_vect][disco_or] > 0).any():
raise InvalidLineStatus(
"A powerline is connected (set to a bus at extremity end) and "
"disconnected (set to bus -1 at origin end)"
)
disco_ex = self._set_topo_vect[self.line_ex_pos_topo_vect] == -1
- if np.any(self._set_topo_vect[self.line_or_pos_topo_vect][disco_ex] > 0):
+ if (self._set_topo_vect[self.line_or_pos_topo_vect][disco_ex] > 0).any():
raise InvalidLineStatus(
"A powerline is connected (set to a bus at origin end) and "
"disconnected (set to bus -1 at extremity end)"
@@ -2551,17 +2550,17 @@ def _check_for_ambiguity(self):
raise AmbiguousAction(
'Action of type "set_bus" are not supported by this action type'
)
- if np.any(
+ if (
self._set_topo_vect[self.line_or_pos_topo_vect[id_disc]] > 0
- ) or np.any(self._set_topo_vect[self.line_ex_pos_topo_vect[id_disc]] > 0):
+ ).any() or (self._set_topo_vect[self.line_ex_pos_topo_vect[id_disc]] > 0).any():
raise InvalidLineStatus(
"You ask to disconnect a powerline but also to connect it "
"to a certain bus."
)
- if np.any(
+ if (
self._set_topo_vect[self.line_or_pos_topo_vect[id_reco]] == -1
- ) or np.any(self._set_topo_vect[self.line_ex_pos_topo_vect[id_reco]] == -1):
+ ).any() or (self._set_topo_vect[self.line_ex_pos_topo_vect[id_reco]] == -1).any():
raise InvalidLineStatus(
"You ask to reconnect a powerline but also to disconnect it "
"from a certain bus."
@@ -2571,27 +2570,27 @@ def _check_for_ambiguity(self):
raise AmbiguousAction(
'Action of type "change_bus" are not supported by this action type'
)
- if np.any(
+ if (
self._change_bus_vect[self.line_or_pos_topo_vect[id_disc]] > 0
- ) or np.any(self._change_bus_vect[self.line_ex_pos_topo_vect[id_disc]] > 0):
+ ).any() or (self._change_bus_vect[self.line_ex_pos_topo_vect[id_disc]] > 0).any():
raise InvalidLineStatus(
"You ask to disconnect a powerline but also to change its bus."
)
- if np.any(
+ if (
self._change_bus_vect[
self.line_or_pos_topo_vect[self._set_line_status == 1]
]
- ):
+ ).any():
raise InvalidLineStatus(
"You ask to connect an origin powerline but also to *change* the bus to which "
"it is connected. This is ambiguous. You must *set* this bus instead."
)
- if np.any(
+ if (
self._change_bus_vect[
self.line_ex_pos_topo_vect[self._set_line_status == 1]
]
- ):
+ ).any():
raise InvalidLineStatus(
"You ask to connect an extremity powerline but also to *change* the bus to "
"which it is connected. This is ambiguous. You must *set* this bus instead."
@@ -2642,7 +2641,7 @@ def _check_for_ambiguity(self):
f"{self.dim_alarms}"
)
else:
- if np.any(self._raise_alarm):
+ if self._raise_alarm.any():
raise AmbiguousAction(
f"Unrecognize alarm action: an action acts on the alarm, yet it's not tagged "
f"as doing so. Expect wrong behaviour."
@@ -2655,10 +2654,10 @@ def _check_for_ambiguity(self):
f"{self.dim_alerts}"
)
else:
- if np.any(self._raise_alert):
+ if self._raise_alert.any():
raise AmbiguousActionRaiseAlert(
- f"Unrecognize alert action: an action acts on the alert, yet it's not tagged "
- f"as doing so. Expect wrong behaviour."
+ "Unrecognize alert action: an action acts on the alert, yet it's not tagged "
+ "as doing so. Expect wrong behaviour."
)
def _is_storage_ambiguous(self):
@@ -2677,13 +2676,13 @@ def _is_storage_ambiguous(self):
"self._storage_power.shape[0] != self.n_storage: wrong number of storage "
"units affected"
)
- if np.any(self._storage_power < -self.storage_max_p_prod):
+ if (self._storage_power < -self.storage_max_p_prod).any():
where_bug = np.where(self._storage_power < -self.storage_max_p_prod)[0]
raise InvalidStorage(
f"you asked a storage unit to absorb more than what it can: "
f"self._storage_power[{where_bug}] < -self.storage_max_p_prod[{where_bug}]."
)
- if np.any(self._storage_power > self.storage_max_p_absorb):
+ if (self._storage_power > self.storage_max_p_absorb).any():
where_bug = np.where(self._storage_power > self.storage_max_p_absorb)[0]
raise InvalidStorage(
f"you asked a storage unit to produce more than what it can: "
@@ -2691,9 +2690,9 @@ def _is_storage_ambiguous(self):
)
if "_storage_power" not in self.attr_list_set:
- if np.any(self._set_topo_vect[self.storage_pos_topo_vect] > 0):
+ if (self._set_topo_vect[self.storage_pos_topo_vect] > 0).any():
raise InvalidStorage("Attempt to modify bus (set) of a storage unit")
- if np.any(self._change_bus_vect[self.storage_pos_topo_vect]):
+ if (self._change_bus_vect[self.storage_pos_topo_vect]).any():
raise InvalidStorage("Attempt to modify bus (change) of a storage unit")
def _is_curtailment_ambiguous(self):
@@ -2717,21 +2716,21 @@ def _is_curtailment_ambiguous(self):
"units affected"
)
- if np.any((self._curtail < 0.0) & (self._curtail != -1.0)):
+ if ((self._curtail < 0.0) & (self._curtail != -1.0)).any():
where_bug = np.where((self._curtail < 0.0) & (self._curtail != -1.0))[0]
raise InvalidCurtailment(
f"you asked to perform a negative curtailment: "
f"self._curtail[{where_bug}] < 0. "
f"Curtailment should be a real number between 0.0 and 1.0"
)
- if np.any(self._curtail > 1.0):
+ if (self._curtail > 1.0).any():
where_bug = np.where(self._curtail > 1.0)[0]
raise InvalidCurtailment(
f"you asked a storage unit to produce more than what it can: "
f"self._curtail[{where_bug}] > 1. "
f"Curtailment should be a real number between 0.0 and 1.0"
)
- if np.any(self._curtail[~self.gen_renewable] != -1.0):
+ if (self._curtail[~self.gen_renewable] != -1.0).any():
raise InvalidCurtailment(
"Trying to apply a curtailment on a non renewable generator"
)
@@ -2955,7 +2954,7 @@ def __str__(self) -> str:
else:
line_str = "s: \n\t \t - " + "\n\t \t - ".join(
[f": {i} (on line {l})" for i,l in zip(i_alert,li_line)])
- res.append(f"\t - Raise alert(s) " f"{line_str}")
+ res.append(f"\t - Raise alert(s) {line_str}")
else:
res.append("\t - Not raise any alert")
return "\n".join(res)
@@ -2988,32 +2987,32 @@ def impact_on_objects(self) -> dict:
"reconnections": {"count": 0, "powerlines": []},
"disconnections": {"count": 0, "powerlines": []},
}
- if np.any(self._set_line_status == 1):
+ if (self._set_line_status == 1).any():
force_line_status["changed"] = True
has_impact = True
- force_line_status["reconnections"]["count"] = np.sum(
+ force_line_status["reconnections"]["count"] = (
self._set_line_status == 1
- )
+ ).sum()
force_line_status["reconnections"]["powerlines"] = np.where(
self._set_line_status == 1
)[0]
- if np.any(self._set_line_status == -1):
+ if (self._set_line_status == -1).any():
force_line_status["changed"] = True
has_impact = True
- force_line_status["disconnections"]["count"] = np.sum(
+ force_line_status["disconnections"]["count"] = (
self._set_line_status == -1
- )
+ ).sum()
force_line_status["disconnections"]["powerlines"] = np.where(
self._set_line_status == -1
)[0]
# handles action on swtich line status
switch_line_status = {"changed": False, "count": 0, "powerlines": []}
- if np.sum(self._switch_line_status):
+ if self._switch_line_status.sum():
switch_line_status["changed"] = True
has_impact = True
- switch_line_status["count"] = np.sum(self._switch_line_status)
+ switch_line_status["count"] = self._switch_line_status.sum()
switch_line_status["powerlines"] = np.where(self._switch_line_status)[0]
topology = {
@@ -3023,7 +3022,7 @@ def impact_on_objects(self) -> dict:
"disconnect_bus": [],
}
# handles topology
- if np.any(self._change_bus_vect):
+ if self._change_bus_vect.any():
for id_, k in enumerate(self._change_bus_vect):
if k:
obj_id, objt_type, substation_id = self._obj_caract_from_topo_id(
@@ -3040,7 +3039,7 @@ def impact_on_objects(self) -> dict:
topology["changed"] = True
has_impact = True
- if np.any(self._set_topo_vect != 0):
+ if (self._set_topo_vect != 0).any():
for id_, k in enumerate(self._set_topo_vect):
if k > 0:
obj_id, objt_type, substation_id = self._obj_caract_from_topo_id(
@@ -3072,7 +3071,7 @@ def impact_on_objects(self) -> dict:
# handle redispatching
redispatch = {"changed": False, "generators": []}
- if np.any(self._redispatch != 0.0):
+ if (self._redispatch != 0.0).any():
for gen_idx in range(self.n_gen):
if self._redispatch[gen_idx] != 0.0:
gen_name = self.name_gen[gen_idx]
@@ -3198,12 +3197,12 @@ def as_dict(self) -> dict:
res[k] = 1.0 * self._dict_inj[k]
# handles actions on force line status
- if np.any(self._set_line_status != 0):
+ if (self._set_line_status != 0).any():
res["set_line_status"] = {}
- res["set_line_status"]["nb_connected"] = np.sum(self._set_line_status == 1)
- res["set_line_status"]["nb_disconnected"] = np.sum(
+ res["set_line_status"]["nb_connected"] = (self._set_line_status == 1).sum()
+ res["set_line_status"]["nb_disconnected"] = (
self._set_line_status == -1
- )
+ ).sum()
res["set_line_status"]["connected_id"] = np.where(
self._set_line_status == 1
)[0]
@@ -3212,17 +3211,17 @@ def as_dict(self) -> dict:
)[0]
# handles action on swtich line status
- if np.sum(self._switch_line_status):
+ if self._switch_line_status.sum():
res["change_line_status"] = {}
- res["change_line_status"]["nb_changed"] = np.sum(self._switch_line_status)
+ res["change_line_status"]["nb_changed"] = self._switch_line_status.sum()
res["change_line_status"]["changed_id"] = np.where(
self._switch_line_status
)[0]
# handles topology change
- if np.any(self._change_bus_vect):
+ if (self._change_bus_vect).any():
res["change_bus_vect"] = {}
- res["change_bus_vect"]["nb_modif_objects"] = np.sum(self._change_bus_vect)
+ res["change_bus_vect"]["nb_modif_objects"] = self._change_bus_vect.sum()
all_subs = set()
for id_, k in enumerate(self._change_bus_vect):
if k:
@@ -3241,9 +3240,9 @@ def as_dict(self) -> dict:
res["change_bus_vect"]["modif_subs_id"] = sorted(all_subs)
# handles topology set
- if np.any(self._set_topo_vect):
+ if (self._set_topo_vect!= 0).any():
res["set_bus_vect"] = {}
- res["set_bus_vect"]["nb_modif_objects"] = np.sum(self._set_topo_vect)
+ res["set_bus_vect"]["nb_modif_objects"] = (self._set_topo_vect != 0).sum()
all_subs = set()
for id_, k in enumerate(self._set_topo_vect):
if k != 0:
@@ -3262,15 +3261,15 @@ def as_dict(self) -> dict:
res["set_bus_vect"]["nb_modif_subs"] = len(all_subs)
res["set_bus_vect"]["modif_subs_id"] = sorted(all_subs)
- if np.any(self._hazards):
+ if self._hazards.any():
res["hazards"] = np.where(self._hazards)[0]
- res["nb_hazards"] = np.sum(self._hazards)
+ res["nb_hazards"] = self._hazards.sum()
- if np.any(self._maintenance):
+ if self._maintenance.any():
res["maintenance"] = np.where(self._maintenance)[0]
- res["nb_maintenance"] = np.sum(self._maintenance)
+ res["nb_maintenance"] = self._maintenance.sum()
- if np.any(self._redispatch != 0.0):
+ if (self._redispatch != 0.0).any():
res["redispatch"] = 1.0 * self._redispatch
if self._modif_storage:
@@ -3328,14 +3327,14 @@ def get_types(self) -> Tuple[bool, bool, bool, bool, bool, bool, bool]:
injection = "load_p" in self._dict_inj or "prod_p" in self._dict_inj
voltage = "prod_v" in self._dict_inj
if self.shunts_data_available:
- voltage = voltage or np.any(np.isfinite(self.shunt_p))
- voltage = voltage or np.any(np.isfinite(self.shunt_q))
- voltage = voltage or np.any(self.shunt_bus != 0)
+ voltage = voltage or np.isfinite(self.shunt_p).any()
+ voltage = voltage or np.isfinite(self.shunt_q).any()
+ voltage = voltage or (self.shunt_bus != 0).any()
lines_impacted, subs_impacted = self.get_topological_impact()
- topology = np.any(subs_impacted)
- line = np.any(lines_impacted)
- redispatching = np.any(self._redispatch != 0.0)
+ topology = subs_impacted.any()
+ line = lines_impacted.any()
+ redispatching = (self._redispatch != 0.0).any()
storage = self._modif_storage
curtailment = self._modif_curtailment
return injection, voltage, topology, line, redispatching, storage, curtailment
@@ -3427,7 +3426,7 @@ def _aux_effect_on_substation(self, substation_id):
raise Grid2OpException(f"`substation_id` should be positive.")
res = {}
- beg_ = int(np.sum(self.sub_info[:substation_id]))
+ beg_ = int(self.sub_info[:substation_id].sum())
end_ = int(beg_ + self.sub_info[substation_id])
res["change_bus"] = self._change_bus_vect[beg_:end_]
res["set_bus"] = self._set_topo_vect[beg_:end_]
@@ -3762,11 +3761,11 @@ def _aux_affect_object_int(
raise IllegalAction(
f'{name_el}_id should be convertible to integer. Error was : "{exc_}"'
)
- if np.any(values < min_val):
+ if (values < min_val).any():
raise IllegalAction(
f"new_bus should be between {min_val} and {max_val}, found a value < {min_val}"
)
- if np.any(values > max_val):
+ if (values > max_val).any():
raise IllegalAction(
f"new_bus should be between {min_val} and {max_val}, found a value > {max_val}"
)
@@ -4391,11 +4390,11 @@ def _aux_affect_object_bool(
raise IllegalAction(
f'{name_el}_id should be convertible to integer. Error was : "{exc_}"'
)
- if np.any(values < 0):
+ if (values < 0).any():
raise IllegalAction(
f"Impossible to change a negative {name_el} with negative id"
)
- if np.any(values > nb_els):
+ if (values > nb_els).any():
raise IllegalAction(
f"Impossible to change a {name_el} id because there are only "
f"{nb_els} on the grid and you wanted to change an element with an "
@@ -5391,12 +5390,12 @@ def _aux_aux_convert_and_check_np_array(self, array_):
"of float."
)
array_ = array_.astype(dt_int)
- if np.any(array_ < -1):
+ if (array_ < -1).any():
raise IllegalAction(
f"Impossible to set element to bus {np.min(array_)}. Buses must be "
f"-1, 0, 1 or 2."
)
- if np.any(array_ > 2):
+ if (array_ > 2).any():
raise IllegalAction(
f"Impossible to set element to bus {np.max(array_)}. Buses must be "
f"-1, 0, 1 or 2."
@@ -5434,7 +5433,7 @@ def _aux_set_bus_sub(self, values):
# should be a tuple (sub_id, new_topo)
sub_id, topo_repr, nb_el = self._check_for_right_vectors_sub(values)
topo_repr = self._aux_aux_convert_and_check_np_array(topo_repr)
- start_ = np.sum(self.sub_info[:sub_id])
+ start_ = self.sub_info[:sub_id].sum()
end_ = start_ + nb_el
self._set_topo_vect[start_:end_] = topo_repr
elif isinstance(values, list):
@@ -5582,7 +5581,7 @@ def _aux_change_bus_sub(self, values):
sub_id, topo_repr, nb_el = self._check_for_right_vectors_sub(values)
topo_repr = self._aux_aux_convert_and_check_np_array_change(topo_repr)
- start_ = np.sum(self.sub_info[:sub_id])
+ start_ = self.sub_info[:sub_id].sum()
end_ = start_ + nb_el
self._change_bus_vect[start_:end_] = topo_repr
elif isinstance(values, list):
@@ -5813,12 +5812,12 @@ def limit_curtail_storage(self,
res_add_storage = np.zeros(cls.n_storage, dtype=dt_float)
res_add_curtailed = np.zeros(cls.n_gen, dtype=dt_float)
- max_down = np.sum(obs.gen_margin_down)
- max_up = np.sum(obs.gen_margin_up)
+ max_down = obs.gen_margin_down.sum()
+ max_up = obs.gen_margin_up.sum()
# storage
- total_mw_storage = np.sum(res._storage_power)
- total_storage_consumed = np.sum(res._storage_power)
+ total_mw_storage = res._storage_power.sum()
+ total_storage_consumed = res._storage_power.sum()
# curtailment
gen_curtailed = (res._curtail != -1) & cls.gen_renewable
@@ -5836,8 +5835,8 @@ def limit_curtail_storage(self,
mw_curtailed_down[mw_curtailed_down < 0.] = 0.
mw_curtailed_up = -1.0 * mw_curtailed
mw_curtailed_up[mw_curtailed_up < 0.] = 0.
- total_mw_curtailed_down = np.sum(mw_curtailed_down)
- total_mw_curtailed_up = np.sum(mw_curtailed_up)
+ total_mw_curtailed_down = mw_curtailed_down.sum()
+ total_mw_curtailed_up = mw_curtailed_up.sum()
total_mw_curtailed = total_mw_curtailed_down - total_mw_curtailed_up
total_mw_act = total_mw_curtailed + total_mw_storage
@@ -5866,7 +5865,7 @@ def limit_curtail_storage(self,
do_storage_consum = res._storage_power > 0.
remove_storage_mw = remove_mw * total_mw_storage / (total_mw_curtailed_down + total_mw_storage)
tmp_ = -(res._storage_power[do_storage_consum] *
- remove_storage_mw / np.sum(res._storage_power[do_storage_consum]))
+ remove_storage_mw / res._storage_power[do_storage_consum].sum())
res._storage_power[do_storage_consum] += tmp_
res_add_storage[do_storage_consum] = tmp_
@@ -5894,7 +5893,7 @@ def limit_curtail_storage(self,
do_storage_prod = res._storage_power < 0.
remove_storage_mw = add_mw * total_mw_storage / (total_mw_curtailed_up + total_mw_storage)
tmp_ = (res._storage_power[do_storage_prod] *
- remove_storage_mw / np.sum(res._storage_power[do_storage_prod]))
+ remove_storage_mw / res._storage_power[do_storage_prod].sum())
res._storage_power[do_storage_prod] += tmp_
res_add_storage[do_storage_prod] = tmp_
return res, res_add_curtailed, res_add_storage
diff --git a/grid2op/Action/CompleteAction.py b/grid2op/Action/completeAction.py
similarity index 94%
rename from grid2op/Action/CompleteAction.py
rename to grid2op/Action/completeAction.py
index d4e12ec54..548b59009 100644
--- a/grid2op/Action/CompleteAction.py
+++ b/grid2op/Action/completeAction.py
@@ -5,7 +5,7 @@
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
-from grid2op.Action.BaseAction import BaseAction
+from grid2op.Action.baseAction import BaseAction
class CompleteAction(BaseAction):
diff --git a/grid2op/Action/DispatchAction.py b/grid2op/Action/dispatchAction.py
similarity index 94%
rename from grid2op/Action/DispatchAction.py
rename to grid2op/Action/dispatchAction.py
index 9c4f7d6f7..b0ec07fc9 100644
--- a/grid2op/Action/DispatchAction.py
+++ b/grid2op/Action/dispatchAction.py
@@ -6,7 +6,7 @@
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
-from grid2op.Action.PlayableAction import PlayableAction
+from grid2op.Action.playableAction import PlayableAction
class DispatchAction(PlayableAction):
diff --git a/grid2op/Action/DontAct.py b/grid2op/Action/dontAct.py
similarity index 96%
rename from grid2op/Action/DontAct.py
rename to grid2op/Action/dontAct.py
index afe2c622c..09fd0d1c1 100644
--- a/grid2op/Action/DontAct.py
+++ b/grid2op/Action/dontAct.py
@@ -6,7 +6,7 @@
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
-from grid2op.Action.PlayableAction import PlayableAction
+from grid2op.Action.playableAction import PlayableAction
class DontAct(PlayableAction):
diff --git a/grid2op/Action/PlayableAction.py b/grid2op/Action/playableAction.py
similarity index 98%
rename from grid2op/Action/PlayableAction.py
rename to grid2op/Action/playableAction.py
index 61f3dfccb..fd854863e 100644
--- a/grid2op/Action/PlayableAction.py
+++ b/grid2op/Action/playableAction.py
@@ -9,7 +9,7 @@
import warnings
from grid2op.Exceptions import AmbiguousAction
-from grid2op.Action.BaseAction import BaseAction
+from grid2op.Action.baseAction import BaseAction
class PlayableAction(BaseAction):
diff --git a/grid2op/Action/PowerlineChangeAction.py b/grid2op/Action/powerlineChangeAction.py
similarity index 94%
rename from grid2op/Action/PowerlineChangeAction.py
rename to grid2op/Action/powerlineChangeAction.py
index 6c00bc759..e678d6a03 100644
--- a/grid2op/Action/PowerlineChangeAction.py
+++ b/grid2op/Action/powerlineChangeAction.py
@@ -6,7 +6,7 @@
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
-from grid2op.Action.PlayableAction import PlayableAction
+from grid2op.Action.playableAction import PlayableAction
class PowerlineChangeAction(PlayableAction):
diff --git a/grid2op/Action/PowerlineChangeAndDispatchAction.py b/grid2op/Action/powerlineChangeAndDispatchAction.py
similarity index 94%
rename from grid2op/Action/PowerlineChangeAndDispatchAction.py
rename to grid2op/Action/powerlineChangeAndDispatchAction.py
index 3cd684dd4..759d241e0 100644
--- a/grid2op/Action/PowerlineChangeAndDispatchAction.py
+++ b/grid2op/Action/powerlineChangeAndDispatchAction.py
@@ -6,7 +6,7 @@
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
-from grid2op.Action.PlayableAction import PlayableAction
+from grid2op.Action.playableAction import PlayableAction
class PowerlineChangeAndDispatchAction(PlayableAction):
diff --git a/grid2op/Action/PowerlineChangeDispatchAndStorageAction.py b/grid2op/Action/powerlineChangeDispatchAndStorageAction.py
similarity index 93%
rename from grid2op/Action/PowerlineChangeDispatchAndStorageAction.py
rename to grid2op/Action/powerlineChangeDispatchAndStorageAction.py
index 4049b0656..7a0dfa0d0 100644
--- a/grid2op/Action/PowerlineChangeDispatchAndStorageAction.py
+++ b/grid2op/Action/powerlineChangeDispatchAndStorageAction.py
@@ -6,7 +6,7 @@
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
-from grid2op.Action.PlayableAction import PlayableAction
+from grid2op.Action.playableAction import PlayableAction
class PowerlineChangeDispatchAndStorageAction(PlayableAction):
diff --git a/grid2op/Action/PowerlineSetAction.py b/grid2op/Action/powerlineSetAction.py
similarity index 94%
rename from grid2op/Action/PowerlineSetAction.py
rename to grid2op/Action/powerlineSetAction.py
index a506557e9..81c6b67b9 100644
--- a/grid2op/Action/PowerlineSetAction.py
+++ b/grid2op/Action/powerlineSetAction.py
@@ -6,7 +6,7 @@
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
-from grid2op.Action.PlayableAction import PlayableAction
+from grid2op.Action.playableAction import PlayableAction
class PowerlineSetAction(PlayableAction):
diff --git a/grid2op/Action/PowerlineSetAndDispatchAction.py b/grid2op/Action/powerlineSetAndDispatchAction.py
similarity index 95%
rename from grid2op/Action/PowerlineSetAndDispatchAction.py
rename to grid2op/Action/powerlineSetAndDispatchAction.py
index b29e24193..97920d65a 100644
--- a/grid2op/Action/PowerlineSetAndDispatchAction.py
+++ b/grid2op/Action/powerlineSetAndDispatchAction.py
@@ -6,7 +6,7 @@
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
-from grid2op.Action.PlayableAction import PlayableAction
+from grid2op.Action.playableAction import PlayableAction
class PowerlineSetAndDispatchAction(PlayableAction):
diff --git a/grid2op/Action/SerializableActionSpace.py b/grid2op/Action/serializableActionSpace.py
similarity index 99%
rename from grid2op/Action/SerializableActionSpace.py
rename to grid2op/Action/serializableActionSpace.py
index 056506ac4..7c11d2312 100644
--- a/grid2op/Action/SerializableActionSpace.py
+++ b/grid2op/Action/serializableActionSpace.py
@@ -14,7 +14,7 @@
from grid2op.dtypes import dt_int, dt_float, dt_bool
from grid2op.Exceptions import AmbiguousAction, Grid2OpException
from grid2op.Space import SerializableSpace
-from grid2op.Action.BaseAction import BaseAction
+from grid2op.Action.baseAction import BaseAction
class SerializableActionSpace(SerializableSpace):
@@ -1092,15 +1092,15 @@ def get_all_unitary_topologies_set(action_space, sub_id=None):
dt_bool
) # add a zero to first element -> break symmetry
indx[tup] = True
- if np.sum(indx) >= 2 and np.sum(~indx) >= 2:
+ if indx.sum() >= 2 and (~indx).sum() >= 2:
# i need 2 elements on each bus at least (almost all the times, except when a powerline
# is alone on its bus)
new_topo = np.full(shape=num_el, fill_value=1, dtype=dt_int)
new_topo[~indx] = 2
if (
- np.sum(indx[powerlines_id]) == 0
- or np.sum(~indx[powerlines_id]) == 0
+ indx[powerlines_id].sum() == 0
+ or (~indx[powerlines_id]).sum() == 0
):
# if there is a "node" without a powerline, the topology is not valid
continue
@@ -1112,8 +1112,8 @@ def get_all_unitary_topologies_set(action_space, sub_id=None):
else:
# i need to take into account the case where 1 powerline is alone on a bus too
if (
- np.sum(indx[powerlines_id]) >= 1
- and np.sum(~indx[powerlines_id]) >= 1
+ (indx[powerlines_id]).sum() >= 1
+ and (~indx[powerlines_id]).sum() >= 1
):
new_topo = np.full(shape=num_el, fill_value=1, dtype=dt_int)
new_topo[~indx] = 2
@@ -1317,7 +1317,7 @@ def _custom_deepcopy_for_copy(self, new_obj):
def _aux_get_back_to_ref_state_curtail(self, res, obs):
is_curtailed = obs.curtailment_limit != 1.0
- if np.any(is_curtailed):
+ if is_curtailed.any():
res["curtailment"] = []
if not self.supports_type("curtail"):
warnings.warn(
@@ -1333,7 +1333,7 @@ def _aux_get_back_to_ref_state_curtail(self, res, obs):
def _aux_get_back_to_ref_state_line(self, res, obs):
disc_lines = ~obs.line_status
- if np.any(disc_lines):
+ if disc_lines.any():
li_disc = np.where(disc_lines)[0]
res["powerline"] = []
for el in li_disc:
@@ -1351,7 +1351,7 @@ def _aux_get_back_to_ref_state_line(self, res, obs):
def _aux_get_back_to_ref_state_sub(self, res, obs):
not_on_bus_1 = obs.topo_vect > 1 # disconnected lines are handled above
- if np.any(not_on_bus_1):
+ if not_on_bus_1.any():
res["substation"] = []
subs_changed = type(self).grid_objects_types[
not_on_bus_1, type(self).SUB_COL
@@ -1377,7 +1377,7 @@ def _aux_get_back_to_ref_state_sub(self, res, obs):
def _aux_get_back_to_ref_state_redisp(self, res, obs, precision=1e-5):
# TODO this is ugly, probably slow and could definitely be optimized
notredisp_setpoint = obs.target_dispatch != 0.0
- if np.any(notredisp_setpoint):
+ if notredisp_setpoint.any():
need_redisp = np.where(notredisp_setpoint)[0]
res["redispatching"] = []
# combine generators and do not exceed ramps (up or down)
@@ -1442,7 +1442,7 @@ def _aux_get_back_to_ref_state_storage(
# TODO refacto with the redispatching
notredisp_setpoint = obs.storage_charge / obs.storage_Emax != storage_setpoint
delta_time_hour = dt_float(obs.delta_time / 60.0)
- if np.any(notredisp_setpoint):
+ if notredisp_setpoint.any():
need_ajust = np.where(notredisp_setpoint)[0]
res["storage"] = []
# combine storage units and do not exceed maximum power
diff --git a/grid2op/Action/TopologyAction.py b/grid2op/Action/topologyAction.py
similarity index 95%
rename from grid2op/Action/TopologyAction.py
rename to grid2op/Action/topologyAction.py
index 1d52503d1..4fadb649d 100644
--- a/grid2op/Action/TopologyAction.py
+++ b/grid2op/Action/topologyAction.py
@@ -6,7 +6,7 @@
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
-from grid2op.Action.PlayableAction import PlayableAction
+from grid2op.Action.playableAction import PlayableAction
class TopologyAction(PlayableAction):
diff --git a/grid2op/Action/TopologyAndDispatchAction.py b/grid2op/Action/topologyAndDispatchAction.py
similarity index 95%
rename from grid2op/Action/TopologyAndDispatchAction.py
rename to grid2op/Action/topologyAndDispatchAction.py
index b167fded1..b85443724 100644
--- a/grid2op/Action/TopologyAndDispatchAction.py
+++ b/grid2op/Action/topologyAndDispatchAction.py
@@ -6,7 +6,7 @@
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
-from grid2op.Action.PlayableAction import PlayableAction
+from grid2op.Action.playableAction import PlayableAction
class TopologyAndDispatchAction(PlayableAction):
diff --git a/grid2op/Action/TopologyChangeAction.py b/grid2op/Action/topologyChangeAction.py
similarity index 94%
rename from grid2op/Action/TopologyChangeAction.py
rename to grid2op/Action/topologyChangeAction.py
index eaea3e3a7..c8ede25a2 100644
--- a/grid2op/Action/TopologyChangeAction.py
+++ b/grid2op/Action/topologyChangeAction.py
@@ -6,7 +6,7 @@
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
-from grid2op.Action.PlayableAction import PlayableAction
+from grid2op.Action.playableAction import PlayableAction
class TopologyChangeAction(PlayableAction):
diff --git a/grid2op/Action/TopologyChangeAndDispatchAction.py b/grid2op/Action/topologyChangeAndDispatchAction.py
similarity index 95%
rename from grid2op/Action/TopologyChangeAndDispatchAction.py
rename to grid2op/Action/topologyChangeAndDispatchAction.py
index 4ec7e5148..11947f262 100644
--- a/grid2op/Action/TopologyChangeAndDispatchAction.py
+++ b/grid2op/Action/topologyChangeAndDispatchAction.py
@@ -6,7 +6,7 @@
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
-from grid2op.Action.PlayableAction import PlayableAction
+from grid2op.Action.playableAction import PlayableAction
class TopologyChangeAndDispatchAction(PlayableAction):
diff --git a/grid2op/Action/TopologySetAction.py b/grid2op/Action/topologySetAction.py
similarity index 94%
rename from grid2op/Action/TopologySetAction.py
rename to grid2op/Action/topologySetAction.py
index 533d4ca0b..204109694 100644
--- a/grid2op/Action/TopologySetAction.py
+++ b/grid2op/Action/topologySetAction.py
@@ -6,7 +6,7 @@
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
-from grid2op.Action.PlayableAction import PlayableAction
+from grid2op.Action.playableAction import PlayableAction
class TopologySetAction(PlayableAction):
diff --git a/grid2op/Action/TopologySetAndDispatchAction.py b/grid2op/Action/topologySetAndDispatchAction.py
similarity index 95%
rename from grid2op/Action/TopologySetAndDispatchAction.py
rename to grid2op/Action/topologySetAndDispatchAction.py
index a8b36cf48..dee7d797a 100644
--- a/grid2op/Action/TopologySetAndDispatchAction.py
+++ b/grid2op/Action/topologySetAndDispatchAction.py
@@ -6,7 +6,7 @@
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
-from grid2op.Action.PlayableAction import PlayableAction
+from grid2op.Action.playableAction import PlayableAction
class TopologySetAndDispatchAction(PlayableAction):
diff --git a/grid2op/Action/VoltageOnlyAction.py b/grid2op/Action/voltageOnlyAction.py
similarity index 98%
rename from grid2op/Action/VoltageOnlyAction.py
rename to grid2op/Action/voltageOnlyAction.py
index 5ccc91b69..637d87871 100644
--- a/grid2op/Action/VoltageOnlyAction.py
+++ b/grid2op/Action/voltageOnlyAction.py
@@ -9,7 +9,7 @@
import warnings
from grid2op.Exceptions import AmbiguousAction
-from grid2op.Action.BaseAction import BaseAction
+from grid2op.Action.baseAction import BaseAction
class VoltageOnlyAction(BaseAction):
diff --git a/grid2op/Agent/alertAgent.py b/grid2op/Agent/alertAgent.py
new file mode 100644
index 000000000..012995722
--- /dev/null
+++ b/grid2op/Agent/alertAgent.py
@@ -0,0 +1,87 @@
+# Copyright (c) 2023, RTE (https://www.rte-france.com)
+# See AUTHORS.txt
+# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
+# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
+# you can obtain one at http://mozilla.org/MPL/2.0/.
+# SPDX-License-Identifier: MPL-2.0
+# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
+
+import numpy as np
+from grid2op.Action import BaseAction
+from grid2op.Agent.recoPowerlineAgent import RecoPowerlineAgent
+from grid2op.Agent.baseAgent import BaseAgent
+from grid2op.Observation import BaseObservation
+from grid2op.dtypes import dt_int
+
+
+class AlertAgent(BaseAgent):
+ """
+ This is a :class:`AlertAgent` example, which will attempt to reconnect powerlines and send alerts on the worst possible attacks: for each disconnected powerline
+ that can be reconnected, it will simulate the effect of reconnecting it. And reconnect the one that lead to the
+ highest simulated reward. It will also simulate the effect of having a line disconnection on attackable lines and raise alerts for the worst ones
+
+ """
+
+ def __init__(self,
+ action_space,
+ grid_controler=RecoPowerlineAgent,
+ percentage_alert=30,
+ simu_step=1,
+ threshold=0.99):
+ super().__init__(action_space)
+ if isinstance(grid_controler, type):
+ self.grid_controler = grid_controler(action_space)
+ else:
+ self.grid_controler = grid_controler
+
+ self.percentage_alert = percentage_alert
+ self.simu_step = simu_step
+ self.threshold = threshold # if the max flow after a line disconnection is below threshold, then the alert is not raised
+
+ # store the result of the simulation of powerline disconnection
+ self.alertable_line_ids = type(action_space).alertable_line_ids
+ self.n_alertable_lines = len(self.alertable_line_ids)
+ self.nb_overloads = np.zeros(self.n_alertable_lines, dtype=dt_int)
+ self.rho_max_N_1 = np.zeros(self.n_alertable_lines)
+ self.N_1_actions = [self.action_space({"set_line_status": [(id_, -1)]}) for id_ in self.alertable_line_ids]
+ self._first_k = np.zeros(self.n_alertable_lines, dtype=bool)
+ self._first_k[:int(self.percentage_alert / 100. * self.n_alertable_lines)] = True
+
+ def act(self, observation: BaseObservation, reward: float, done: bool = False) -> BaseAction:
+ action = self.grid_controler.act(observation, reward, done)
+
+ self.nb_overloads[:] = 0
+ self.rho_max_N_1[:] = 0.
+
+ # test which backend to know which method to call
+ for i, tmp_act in enumerate(self.N_1_actions):
+ # only simulate if the line is connected
+ if observation.line_status[self.alertable_line_ids[i]]:
+ action_to_simulate = tmp_act
+ action_to_simulate += action
+ action_to_simulate.remove_line_status_from_topo(observation)
+ (
+ simul_obs,
+ simul_reward,
+ simul_done,
+ simul_info,
+ ) = observation.simulate(action_to_simulate, time_step=self.simu_step)
+
+ rho_simu = simul_obs.rho
+ if not simul_done:
+ self.nb_overloads[i] = (rho_simu >= 1).sum()
+ self.rho_max_N_1[i] = (rho_simu).max()
+ else:
+ self.nb_overloads[i] = type(observation).n_line
+ self.rho_max_N_1[i] = 5.
+
+ # sort the index by nb_overloads and, if nb_overloads is equal, sort by rho_max
+ ind = (self.nb_overloads * 1000. + self.rho_max_N_1).argsort()
+ ind = ind[::-1]
+
+ # send alerts when the powerline is among the top k (not to send too many alerts) and
+ # the max rho after the powerline disconnection is too high (above threshold)
+ indices_to_keep = ind[self._first_k & (self.rho_max_N_1[ind] >= self.threshold)]
+ action.raise_alert = [i for i in indices_to_keep]
+
+ return action
diff --git a/grid2op/Agent/recoPowerLinePerArea.py b/grid2op/Agent/recoPowerLinePerArea.py
index 78c114ef9..31c79d047 100644
--- a/grid2op/Agent/recoPowerLinePerArea.py
+++ b/grid2op/Agent/recoPowerLinePerArea.py
@@ -44,7 +44,7 @@ def __init__(self, action_space: ActionSpace, areas_by_sub_id: dict):
for line_id, subor_id in enumerate(type(action_space).line_or_to_subid):
if subor_id in sub_this_area:
self.lines_to_area_id[line_id] = aread_id
- if np.any(self.lines_to_area_id == -1):
+ if (self.lines_to_area_id == -1).any():
raise AgentError("some powerline have no area id")
self.nb_area = len(areas_by_sub_id)
@@ -52,7 +52,7 @@ def act(self, observation: BaseObservation, reward: float, done : bool=False):
line_stat_s = observation.line_status
cooldown = observation.time_before_cooldown_line
can_be_reco = ~line_stat_s & (cooldown == 0)
- if not np.any(can_be_reco):
+ if not can_be_reco.any():
# no line to reconnect
return self.action_space()
area_used = np.full(self.nb_area, fill_value=False, dtype=bool)
diff --git a/grid2op/Agent/recoPowerlineAgent.py b/grid2op/Agent/recoPowerlineAgent.py
index 0272b5928..b4373f9bd 100644
--- a/grid2op/Agent/recoPowerlineAgent.py
+++ b/grid2op/Agent/recoPowerlineAgent.py
@@ -25,7 +25,7 @@ def _get_tested_action(self, observation):
line_stat_s = observation.line_status
cooldown = observation.time_before_cooldown_line
can_be_reco = ~line_stat_s & (cooldown == 0)
- if np.any(can_be_reco):
+ if can_be_reco.any():
res = [
self.action_space({"set_line_status": [(id_, +1)]})
for id_ in np.where(can_be_reco)[0]
diff --git a/grid2op/Backend/__init__.py b/grid2op/Backend/__init__.py
index 4cde4a3d4..8b681e473 100644
--- a/grid2op/Backend/__init__.py
+++ b/grid2op/Backend/__init__.py
@@ -1,4 +1,4 @@
__all__ = ["Backend", "PandaPowerBackend"]
-from grid2op.Backend.Backend import Backend
-from grid2op.Backend.PandaPowerBackend import PandaPowerBackend
+from grid2op.Backend.backend import Backend
+from grid2op.Backend.pandaPowerBackend import PandaPowerBackend
diff --git a/grid2op/Backend/Backend.py b/grid2op/Backend/backend.py
similarity index 99%
rename from grid2op/Backend/Backend.py
rename to grid2op/Backend/backend.py
index c9e91ea5c..15e155e36 100644
--- a/grid2op/Backend/Backend.py
+++ b/grid2op/Backend/backend.py
@@ -57,7 +57,7 @@ class Backend(GridObjects, ABC):
It is NOT recommended to use this class outside the Environment.
- An example of a valid backend is provided in the :class:`PandapowerBackend`.
+ An example of a valid backend is provided in the :class:`PandaPowerBackend`.
All the abstract methods (that need to be implemented for a backend to work properly) are (more information given
in the :ref:`create-backend-module` page):
@@ -622,7 +622,7 @@ def set_thermal_limit(self, limits):
def update_thermal_limit_from_vect(self, thermal_limit_a):
"""You can use it if your backend stores the thermal limits
- of the grid in a vector (see PandaPowerBackend for example)
+ of the grid in a vector (see :class:`PandaPowerBackend` for example)
.. warning::
This is not called by the environment and cannot be used to
@@ -945,7 +945,7 @@ def next_grid_state(self, env, is_dc=False):
] = True
# disconnect the current power lines
- if np.sum(to_disc[lines_status]) == 0:
+ if to_disc[lines_status].sum() == 0:
# no powerlines have been disconnected at this time step, i stop the computation there
break
disconnected_during_cf[to_disc] = ts
@@ -1653,7 +1653,7 @@ def get_action_to_set(self):
if self.shunts_data_available:
p_s, q_s, sh_v, bus_s = self.shunt_info()
dict_["shunt"] = {"shunt_bus": bus_s}
- if np.sum(bus_s >= 1):
+ if (bus_s >= 1).sum():
p_s *= (self._sh_vnkv / sh_v) ** 2
q_s *= (self._sh_vnkv / sh_v) ** 2
p_s[bus_s == -1] = np.NaN
@@ -1722,7 +1722,7 @@ def update_from_obs(self, obs, force_update=False):
dict_["shunt"] = {"shunt_bus": obs._shunt_bus}
shunt_co = obs._shunt_bus >= 1
- if np.sum(shunt_co):
+ if shunt_co.any():
mults = (self._sh_vnkv / obs._shunt_v) ** 2
sh_p = obs._shunt_p * mults
sh_q = obs._shunt_q * mults
@@ -1744,7 +1744,7 @@ def assert_grid_correct(self):
"""
# lazy loading
from grid2op.Action import CompleteAction
- from grid2op.Action._BackendAction import _BackendAction
+ from grid2op.Action._backendAction import _BackendAction
orig_type = type(self)
if orig_type.my_bk_act_class is None:
@@ -1791,22 +1791,22 @@ def assert_grid_correct_after_powerflow(self):
tmp = self.get_line_status()
if tmp.shape[0] != self.n_line:
raise IncorrectNumberOfLines('returned by "backend.get_line_status()"')
- if np.any(~np.isfinite(tmp)):
+ if (~np.isfinite(tmp)).any():
raise EnvironmentError(type(self).ERR_INIT_POWERFLOW)
tmp = self.get_line_flow()
if tmp.shape[0] != self.n_line:
raise IncorrectNumberOfLines('returned by "backend.get_line_flow()"')
- if np.any(~np.isfinite(tmp)):
+ if (~np.isfinite(tmp)).any():
raise EnvironmentError(type(self).ERR_INIT_POWERFLOW)
tmp = self.get_thermal_limit()
if tmp.shape[0] != self.n_line:
raise IncorrectNumberOfLines('returned by "backend.get_thermal_limit()"')
- if np.any(~np.isfinite(tmp)):
+ if (~np.isfinite(tmp)).any():
raise EnvironmentError(type(self).ERR_INIT_POWERFLOW)
tmp = self.get_line_overflow()
if tmp.shape[0] != self.n_line:
raise IncorrectNumberOfLines('returned by "backend.get_line_overflow()"')
- if np.any(~np.isfinite(tmp)):
+ if (~np.isfinite(tmp)).any():
raise EnvironmentError(type(self).ERR_INIT_POWERFLOW)
tmp = self.generators_info()
@@ -1857,10 +1857,10 @@ def assert_grid_correct_after_powerflow(self):
)
tmp = self.get_topo_vect()
- if tmp.shape[0] != np.sum(self.sub_info):
+ if tmp.shape[0] != self.sub_info.sum():
raise IncorrectNumberOfElements('returned by "backend.get_topo_vect()"')
- if np.any(~np.isfinite(tmp)):
+ if (~np.isfinite(tmp)).any():
raise EnvError(
'Some components of "backend.get_topo_vect()" are not finite. This should be integer.'
)
diff --git a/grid2op/Backend/EducPandaPowerBackend.py b/grid2op/Backend/educPandaPowerBackend.py
similarity index 99%
rename from grid2op/Backend/EducPandaPowerBackend.py
rename to grid2op/Backend/educPandaPowerBackend.py
index a568bd1d5..effbaa67c 100644
--- a/grid2op/Backend/EducPandaPowerBackend.py
+++ b/grid2op/Backend/educPandaPowerBackend.py
@@ -18,7 +18,7 @@
import scipy
from grid2op.dtypes import dt_int, dt_float, dt_bool
-from grid2op.Backend.Backend import Backend
+from grid2op.Backend.backend import Backend
from grid2op.Exceptions import *
diff --git a/grid2op/Backend/PandaPowerBackend.py b/grid2op/Backend/pandaPowerBackend.py
similarity index 99%
rename from grid2op/Backend/PandaPowerBackend.py
rename to grid2op/Backend/pandaPowerBackend.py
index 716e7c039..f532115d5 100644
--- a/grid2op/Backend/PandaPowerBackend.py
+++ b/grid2op/Backend/pandaPowerBackend.py
@@ -18,7 +18,7 @@
import scipy
from grid2op.dtypes import dt_int, dt_float, dt_bool
-from grid2op.Backend.Backend import Backend
+from grid2op.Backend.backend import Backend
from grid2op.Action import BaseAction
from grid2op.Exceptions import *
@@ -276,7 +276,7 @@ def get_nb_active_bus(self):
res: :class:`int`
The total number of active buses.
"""
- return np.sum(self._grid.bus["in_service"])
+ return self._grid.bus["in_service"].sum()
@staticmethod
def _load_grid_load_p_mw(grid):
@@ -641,7 +641,7 @@ def _init_private_attrs(self):
self._what_object_where[sub_id].append(("storage", "bus", i))
- self.dim_topo = np.sum(self.sub_info)
+ self.dim_topo = self.sub_info.sum()
self._compute_pos_big_topo()
# utilities for imeplementing apply_action
@@ -823,11 +823,11 @@ def apply_action(self, backendAction=None):
bus_is[i + self.__nb_bus_before] = bus2_status
tmp_prod_p = self._get_vector_inj["prod_p"](self._grid)
- if np.any(prod_p.changed):
+ if (prod_p.changed).any():
tmp_prod_p.iloc[prod_p.changed] = prod_p.values[prod_p.changed]
tmp_prod_v = self._get_vector_inj["prod_v"](self._grid)
- if np.any(prod_v.changed):
+ if (prod_v.changed).any():
tmp_prod_v.iloc[prod_v.changed] = (
prod_v.values[prod_v.changed] / self.prod_pu_to_kv[prod_v.changed]
)
@@ -837,17 +837,17 @@ def apply_action(self, backendAction=None):
self._grid["ext_grid"]["vm_pu"] = 1.0 * tmp_prod_v[self._id_bus_added]
tmp_load_p = self._get_vector_inj["load_p"](self._grid)
- if np.any(load_p.changed):
+ if (load_p.changed).any():
tmp_load_p.iloc[load_p.changed] = load_p.values[load_p.changed]
tmp_load_q = self._get_vector_inj["load_q"](self._grid)
- if np.any(load_q.changed):
+ if (load_q.changed).any():
tmp_load_q.iloc[load_q.changed] = load_q.values[load_q.changed]
if self.n_storage > 0:
# active setpoint
tmp_stor_p = self._grid.storage["p_mw"]
- if np.any(storage.changed):
+ if (storage.changed).any():
tmp_stor_p.iloc[storage.changed] = storage.values[storage.changed]
# topology of the storage
@@ -870,15 +870,15 @@ def apply_action(self, backendAction=None):
if type(backendAction).shunts_data_available:
shunt_p, shunt_q, shunt_bus = shunts__
- if np.any(shunt_p.changed):
+ if (shunt_p.changed).any():
self._grid.shunt["p_mw"].iloc[shunt_p.changed] = shunt_p.values[
shunt_p.changed
]
- if np.any(shunt_q.changed):
+ if (shunt_q.changed).any():
self._grid.shunt["q_mvar"].iloc[shunt_q.changed] = shunt_q.values[
shunt_q.changed
]
- if np.any(shunt_bus.changed):
+ if (shunt_bus.changed).any():
sh_service = shunt_bus.values[shunt_bus.changed] != -1
self._grid.shunt["in_service"].iloc[shunt_bus.changed] = sh_service
chg_and_in_service = sh_service & shunt_bus.changed
@@ -1010,14 +1010,14 @@ def runpf(self, is_dc=False):
else:
self._pf_init = "auto"
- if np.any(~self._grid.load["in_service"]):
+ if (~self._grid.load["in_service"]).any():
# TODO see if there is a better way here -> do not handle this here, but rather in Backend._next_grid_state
raise pp.powerflow.LoadflowNotConverged("Disconnected load: for now grid2op cannot handle properly"
" disconnected load. If you want to disconnect one, say it"
" consumes 0. instead. Please check loads: "
f"{np.where(~self._grid.load['in_service'])[0]}"
)
- if np.any(~self._grid.gen["in_service"]):
+ if (~self._grid.gen["in_service"]).any():
# TODO see if there is a better way here -> do not handle this here, but rather in Backend._next_grid_state
raise pp.powerflow.LoadflowNotConverged("Disconnected gen: for now grid2op cannot handle properly"
" disconnected generators. If you want to disconnect one, say it"
@@ -1131,7 +1131,7 @@ def runpf(self, is_dc=False):
self.storage_theta[:],
) = self._storages_info()
deact_storage = ~np.isfinite(self.storage_v)
- if np.any(np.abs(self.storage_p[deact_storage]) > self.tol):
+ if (np.abs(self.storage_p[deact_storage]) > self.tol).any():
raise pp.powerflow.LoadflowNotConverged(
"Isolated storage set to absorb / produce something"
)
diff --git a/grid2op/Chronics/GSFFWFWM.py b/grid2op/Chronics/GSFFWFWM.py
index 6c9ff44dc..fc09e16e3 100644
--- a/grid2op/Chronics/GSFFWFWM.py
+++ b/grid2op/Chronics/GSFFWFWM.py
@@ -181,7 +181,7 @@ def _generate_matenance_static(name_line,
idx_line_maintenance = np.array(
[el in line_to_maintenance for el in columnsNames]
)
- nb_line_maint = np.sum(idx_line_maintenance)
+ nb_line_maint = idx_line_maintenance.sum()
if nb_line_maint == 0:
# TODO log something there !
return res
@@ -240,7 +240,7 @@ def _generate_matenance_static(name_line,
size=n_lines_maintenance,
)
- n_Generated_Maintenance = np.sum(are_lines_in_maintenance)
+ n_Generated_Maintenance = are_lines_in_maintenance.sum()
# check if the number of maintenance is not above the max allowed. otherwise randomly pick up the right
# number
if n_Generated_Maintenance > maxDailyMaintenance:
diff --git a/grid2op/Chronics/multiFolder.py b/grid2op/Chronics/multiFolder.py
index a3ebc4f68..6be75bf3c 100644
--- a/grid2op/Chronics/multiFolder.py
+++ b/grid2op/Chronics/multiFolder.py
@@ -12,8 +12,8 @@
import numpy as np
from datetime import timedelta, datetime
-from grid2op.dtypes import dt_int
-from grid2op.Exceptions import *
+from grid2op.dtypes import dt_int, dt_float
+from grid2op.Exceptions import ChronicsNotFoundError, ChronicsError
from grid2op.Chronics.gridValue import GridValue
from grid2op.Chronics.gridStateFromFile import GridStateFromFile
@@ -211,7 +211,7 @@ def init_subpath(self):
self.subpaths = [
os.path.join(self.path, el)
for el in os.listdir(self.path)
- if os.path.isdir(os.path.join(self.path, el))
+ if os.path.isdir(os.path.join(self.path, el)) and (el != "__pycache__") and (not el.startswith("."))
]
self.subpaths.sort()
self.subpaths = np.array(self.subpaths)
@@ -339,9 +339,17 @@ def sample_next_chronics(self, probabilities=None):
self._prev_cache_id = -1
if probabilities is None:
probabilities = np.ones(self._order.shape[0])
-
+ try:
+ probabilities = np.array(probabilities, dtype=dt_float)
+ except Exception as exc_:
+ raise ChronicsError("Impossible to convert the probablities given to an array of float") from exc_
+
+ sum_prob = probabilities.sum()
+ if abs(sum_prob) <= 1e-5:
+ raise ChronicsError("Impossible to use the given probabilities argument, it sums to 0. (or close enough to it)")
+
# make sure it sums to 1
- probabilities /= np.sum(probabilities)
+ probabilities /= sum_prob
# take one at "random" among these
selected = self.space_prng.choice(self._order, p=probabilities)
id_sel = np.where(self._order == selected)[0]
@@ -377,7 +385,7 @@ def reset(self):
self._order.append(i)
if len(self._order) == 0:
- raise RuntimeError(
+ raise ChronicsError(
'Impossible to initialize the Multifolder. Your "filter_fun" filters out all the '
"possible scenarios."
)
diff --git a/grid2op/Converter/AnalogStateConverter.py b/grid2op/Converter/AnalogStateConverter.py
index d03507344..f3dbc1a8b 100644
--- a/grid2op/Converter/AnalogStateConverter.py
+++ b/grid2op/Converter/AnalogStateConverter.py
@@ -230,7 +230,7 @@ def size_obs(obs):
obs.n_line * 5,
]
)
- return np.sum(dims)
+ return dims.sum()
@staticmethod
def netbus_to_act_setbus(obs, net_bus):
@@ -286,7 +286,7 @@ def netbus_rnd(obs, n_bus=2):
# Pick the elements to change at random
rnd_sub_elems = np.random.randint(0, n_elem, rnd_n_changes)
# Set the topo vect
- sub_topo_pos = np.sum(obs.sub_info[0:rnd_sub])
+ sub_topo_pos = obs.sub_info[0:rnd_sub].sum()
for elem_pos in rnd_sub_elems:
rnd_bus = np.random.randint(n_bus)
rnd_topo[rnd_bus][sub_topo_pos + elem_pos] = 1.0
diff --git a/grid2op/Converter/BackendConverter.py b/grid2op/Converter/BackendConverter.py
index 141843889..f3ca3868d 100644
--- a/grid2op/Converter/BackendConverter.py
+++ b/grid2op/Converter/BackendConverter.py
@@ -487,7 +487,7 @@ def assert_grid_correct(self):
assert np.all(sorted(self._topo_sr2tg[self._topo_tg2sr]) == np.arange(self.dim_topo))
topo_sr2tg_without_storage = self._topo_sr2tg[self._topo_sr2tg >= 0]
- assert np.sum(self._topo_sr2tg == -1) == tg_cls.n_storage
+ assert (self._topo_sr2tg == -1).sum() == tg_cls.n_storage
assert np.all(self._topo_tg2sr[topo_sr2tg_without_storage] >= 0)
target_without_storage = np.array([i for i in range(tg_cls.dim_topo)
if not i in tg_cls.storage_pos_topo_vect])
diff --git a/grid2op/Converter/ConnectivityConverter.py b/grid2op/Converter/ConnectivityConverter.py
index 4a8640169..7aaaa15ea 100644
--- a/grid2op/Converter/ConnectivityConverter.py
+++ b/grid2op/Converter/ConnectivityConverter.py
@@ -378,7 +378,7 @@ def convert_act(self, encoded_act, explore=None):
raise RuntimeError(
f"Invalid encoded_act shape provided it should be {self.n}"
)
- if np.any((encoded_act < -1.0) | (encoded_act > 1.0)):
+ if ((encoded_act < -1.0) | (encoded_act > 1.0)).any():
errors = (encoded_act < -1.0) | (encoded_act > 1.0)
indexes = np.where(errors)[0]
raise RuntimeError(
@@ -503,7 +503,7 @@ def _compute_disagreement(self, encoded_act, topo_vect):
)
# for the elements that are not affected by the action (i don't know where they will be: maximum penalty)
not_set = np.full(
- np.sum(((bus_el1 == 0) | (bus_el2 == 0)) & set_component),
+ (((bus_el1 == 0) | (bus_el2 == 0)) & set_component).sum(),
fill_value=2,
dtype=dt_int,
)
diff --git a/grid2op/Environment/__init__.py b/grid2op/Environment/__init__.py
index a171bf117..1375aad0a 100644
--- a/grid2op/Environment/__init__.py
+++ b/grid2op/Environment/__init__.py
@@ -8,10 +8,10 @@
"TimedOutEnvironment"
]
-from grid2op.Environment.BaseEnv import BaseEnv
-from grid2op.Environment.Environment import Environment
-from grid2op.Environment.BaseMultiProcessEnv import BaseMultiProcessEnvironment
-from grid2op.Environment.SingleEnvMultiProcess import SingleEnvMultiProcess
-from grid2op.Environment.MultiEnvMultiProcess import MultiEnvMultiProcess
-from grid2op.Environment.MultiMixEnv import MultiMixEnvironment
+from grid2op.Environment.baseEnv import BaseEnv
+from grid2op.Environment.environment import Environment
+from grid2op.Environment.baseMultiProcessEnv import BaseMultiProcessEnvironment
+from grid2op.Environment.singleEnvMultiProcess import SingleEnvMultiProcess
+from grid2op.Environment.multiEnvMultiProcess import MultiEnvMultiProcess
+from grid2op.Environment.multiMixEnv import MultiMixEnvironment
from grid2op.Environment.timedOutEnv import TimedOutEnvironment
diff --git a/grid2op/Environment/_forecast_env.py b/grid2op/Environment/_forecast_env.py
index 8223ee067..ad08fc7df 100644
--- a/grid2op/Environment/_forecast_env.py
+++ b/grid2op/Environment/_forecast_env.py
@@ -9,7 +9,7 @@
from typing import Tuple
from grid2op.Action import BaseAction
from grid2op.Observation import BaseObservation
-from grid2op.Environment.Environment import Environment
+from grid2op.Environment.environment import Environment
class _ForecastEnv(Environment):
diff --git a/grid2op/Environment/_ObsEnv.py b/grid2op/Environment/_obsEnv.py
similarity index 98%
rename from grid2op/Environment/_ObsEnv.py
rename to grid2op/Environment/_obsEnv.py
index 0fef691a4..8db0060d4 100644
--- a/grid2op/Environment/_ObsEnv.py
+++ b/grid2op/Environment/_obsEnv.py
@@ -12,7 +12,7 @@
from grid2op.Exceptions.EnvExceptions import EnvError
from grid2op.dtypes import dt_int, dt_float, dt_bool
-from grid2op.Environment.BaseEnv import BaseEnv
+from grid2op.Environment.baseEnv import BaseEnv
from grid2op.Chronics import ChangeNothing
from grid2op.Rules import RulesChecker
from grid2op.operator_attention import LinearAttentionBudget
@@ -94,6 +94,7 @@ def __init__(
self._reward_helper = reward_helper
self._helper_action_class = helper_action_class
+ # TODO init reward and other reward
# initialize the observation space
self._obsClass = None
@@ -326,7 +327,7 @@ def init(
reconnected,
first_ts_maintenance,
) = self._update_vector_with_timestep(time_step, is_overflow)
- if np.any(first_ts_maintenance):
+ if first_ts_maintenance.any():
set_status = np.array(self._line_status_me, dtype=dt_int)
set_status[first_ts_maintenance] = -1
topo_vect = np.array(self._topo_vect, dtype=dt_int)
@@ -336,7 +337,7 @@ def init(
set_status = self._line_status_me
topo_vect = self._topo_vect
- if np.any(still_in_maintenance):
+ if still_in_maintenance.any():
set_status[still_in_maintenance] = -1
topo_vect = np.array(self._topo_vect, dtype=dt_int)
topo_vect[self.line_or_pos_topo_vect[still_in_maintenance]] = -1
@@ -450,7 +451,7 @@ def simulate(self, action):
obs, reward, done, info = self.step(action)
return obs, reward, done, info
- def get_obs(self, _update_state=True):
+ def get_obs(self, _update_state=True, _do_copy=True):
"""
INTERNAL
@@ -468,7 +469,11 @@ def get_obs(self, _update_state=True):
"environment that cannot be copied.")
if _update_state:
self.current_obs.update(self, with_forecast=False)
- res = self.current_obs.copy()
+
+ if _do_copy:
+ res = copy.deepcopy(self.current_obs)
+ else:
+ res = self.current_obs
return res
def update_grid(self, env):
diff --git a/grid2op/Environment/BaseEnv.py b/grid2op/Environment/baseEnv.py
similarity index 98%
rename from grid2op/Environment/BaseEnv.py
rename to grid2op/Environment/baseEnv.py
index f86f90cd4..a25a90e73 100644
--- a/grid2op/Environment/BaseEnv.py
+++ b/grid2op/Environment/baseEnv.py
@@ -16,13 +16,12 @@
from typing import Optional, Tuple
import warnings
import numpy as np
-from scipy.optimize import minimize
-from scipy.optimize import LinearConstraint
+from scipy.optimize import (minimize, LinearConstraint)
from abc import ABC, abstractmethod
-from grid2op.Action.ActionSpace import ActionSpace
-from grid2op.Observation.baseObservation import BaseObservation
-from grid2op.Observation.observationSpace import ObservationSpace
-from grid2op.Observation.highresSimCounter import HighResSimCounter
+from grid2op.Action import ActionSpace
+from grid2op.Observation import (BaseObservation,
+ ObservationSpace,
+ HighResSimCounter)
from grid2op.Backend import Backend
from grid2op.dtypes import dt_int, dt_float, dt_bool
from grid2op.Space import GridObjects, RandomObject
@@ -35,7 +34,7 @@
from grid2op.Rules import AlwaysLegal
from grid2op.Opponent import BaseOpponent
from grid2op.operator_attention import LinearAttentionBudget
-from grid2op.Action._BackendAction import _BackendAction
+from grid2op.Action._backendAction import _BackendAction
from grid2op.Chronics import ChronicsHandler
from grid2op.Rules import AlwaysLegal, BaseRules
@@ -108,7 +107,7 @@ class BaseEnv(GridObjects, RandomObject, ABC):
The current observation (or None if it's not intialized)
backend: :class:`grid2op.Backend.Backend`
- The backend used to compute the powerflows and cascading failures.
+ The backend used to compute the powerflows.
done: ``bool``
Whether the environment is "done". If ``True`` you need to call :func:`Environment.reset` in order
@@ -1674,7 +1673,7 @@ def set_thermal_limit(self, thermal_limit):
"Attempt to set thermal limit on {} powerlines while there are {}"
"on the grid".format(tmp.shape[0], self.n_line)
)
- if np.any(~np.isfinite(tmp)):
+ if (~np.isfinite(tmp)).any():
raise Grid2OpException(
"Impossible to use non finite value for thermal limits."
)
@@ -1748,7 +1747,7 @@ def _prepare_redisp(self, action, new_p, already_modified_gen):
):
return valid, except_, info_
# check that everything is consistent with pmin, pmax:
- if np.any(self._target_dispatch > self.gen_pmax - self.gen_pmin):
+ if (self._target_dispatch > self.gen_pmax - self.gen_pmin).any():
# action is invalid, the target redispatching would be above pmax for at least a generator
cond_invalid = self._target_dispatch > self.gen_pmax - self.gen_pmin
except_ = InvalidRedispatching(
@@ -1760,7 +1759,7 @@ def _prepare_redisp(self, action, new_p, already_modified_gen):
)
self._target_dispatch -= redisp_act_orig
return valid, except_, info_
- if np.any(self._target_dispatch < self.gen_pmin - self.gen_pmax):
+ if (self._target_dispatch < self.gen_pmin - self.gen_pmax).any():
# action is invalid, the target redispatching would be below pmin for at least a generator
cond_invalid = self._target_dispatch < self.gen_pmin - self.gen_pmax
except_ = InvalidRedispatching(
@@ -1774,7 +1773,7 @@ def _prepare_redisp(self, action, new_p, already_modified_gen):
return valid, except_, info_
# i can't redispatch turned off generators [turned off generators need to be turned on before redispatching]
- if np.any(redisp_act_orig[new_p == 0.0]) and self._forbid_dispatch_off:
+ if (redisp_act_orig[new_p == 0.0]).any() and self._forbid_dispatch_off:
# action is invalid, a generator has been redispatched, but it's turned off
except_ = InvalidRedispatching(
"Impossible to dispatch a turned off generator"
@@ -1785,7 +1784,7 @@ def _prepare_redisp(self, action, new_p, already_modified_gen):
if self._forbid_dispatch_off is True:
redisp_act_orig_cut = 1.0 * redisp_act_orig
redisp_act_orig_cut[new_p == 0.0] = 0.0
- if np.any(redisp_act_orig_cut != redisp_act_orig):
+ if (redisp_act_orig_cut != redisp_act_orig).any():
info_.append(
{
"INFO: redispatching cut because generator will be turned_off": np.where(
@@ -1804,7 +1803,7 @@ def _make_redisp(self, already_modified_gen, new_p):
mismatch = self._actual_dispatch - self._target_dispatch
mismatch = np.abs(mismatch)
if (
- np.abs(np.sum(self._actual_dispatch)) >= self._tol_poly
+ np.abs((self._actual_dispatch).sum()) >= self._tol_poly
or np.max(mismatch) >= self._tol_poly
or np.abs(self._amount_storage) >= self._tol_poly
or np.abs(self._sum_curtailment_mw) >= self._tol_poly
@@ -1886,7 +1885,7 @@ def _compute_dispatch_vect(self, already_modified_gen, new_p):
)
already_modified_gen_me = already_modified_gen[gen_participating]
target_vals_me = target_vals[already_modified_gen_me]
- nb_dispatchable = np.sum(gen_participating)
+ nb_dispatchable = gen_participating.sum()
tmp_zeros = np.zeros((1, nb_dispatchable), dtype=dt_float)
coeffs = 1.0 / (
self.gen_max_ramp_up + self.gen_max_ramp_down + self._epsilon_poly
@@ -1909,7 +1908,7 @@ def _compute_dispatch_vect(self, already_modified_gen, new_p):
# see https://stackoverflow.com/questions/11155721/positive-directional-derivative-for-linesearch
# where they advised to scale the function
- scale_objective = max(0.5 * np.sum(np.abs(target_vals_me_optim)) ** 2, 1.0)
+ scale_objective = max(0.5 * np.abs(target_vals_me_optim).sum() ** 2, 1.0)
scale_objective = np.round(scale_objective, decimals=4)
scale_objective = dt_float(scale_objective)
@@ -1969,8 +1968,8 @@ def _compute_dispatch_vect(self, already_modified_gen, new_p):
# choose a good initial point (close to the solution)
# the idea here is to chose a initial point that would be close to the
# desired solution (split the (sum of the) dispatch to the available generators)
- x0 = np.zeros(np.sum(gen_participating))
- if np.any(self._target_dispatch != 0.) or np.any(already_modified_gen):
+ x0 = np.zeros(gen_participating.sum())
+ if (self._target_dispatch != 0.).any() or already_modified_gen.any():
gen_for_x0 = self._target_dispatch[gen_participating] != 0.
gen_for_x0 |= already_modified_gen[gen_participating]
x0[gen_for_x0] = (
@@ -1984,9 +1983,9 @@ def _compute_dispatch_vect(self, already_modified_gen, new_p):
# in this "if" block I set the other component of x0 to
# their "right" value
can_adjust = (x0 == 0.0)
- if np.any(can_adjust):
- init_sum = np.sum(x0)
- denom_adjust = np.sum(1.0 / weights[can_adjust])
+ if can_adjust.any():
+ init_sum = x0.sum()
+ denom_adjust = (1.0 / weights[can_adjust]).sum()
if denom_adjust <= 1e-2:
# i don't want to divide by something too cloose to 0.
denom_adjust = 1.0
@@ -2064,10 +2063,10 @@ def _detect_infeasible_dispatch(self, incr_in_chronics, avail_down, avail_up):
"""This function is an attempt to give more detailed log by detecting infeasible dispatch"""
except_ = None
sum_move = (
- np.sum(incr_in_chronics) + self._amount_storage - self._sum_curtailment_mw
+ incr_in_chronics.sum() + self._amount_storage - self._sum_curtailment_mw
)
- avail_down_sum = np.sum(avail_down)
- avail_up_sum = np.sum(avail_up)
+ avail_down_sum = avail_down.sum()
+ avail_up_sum = avail_up.sum()
gen_setpoint = self._gen_activeprod_t_redisp[self.gen_redispatchable]
if sum_move > avail_up_sum:
# infeasible because too much is asked
@@ -2237,11 +2236,10 @@ def _handle_updown_times(self, gen_up_before, redisp_act):
gen_still_connected = gen_up_before & gen_up_after
gen_still_disconnected = (~gen_up_before) & (~gen_up_after)
- if (
- np.any(
+ if ((
self._gen_downtime[gen_connected_this_timestep]
< self.gen_min_downtime[gen_connected_this_timestep]
- )
+ ).any()
and not self._ignore_min_up_down_times
):
# i reconnected a generator before the minimum time allowed
@@ -2260,10 +2258,10 @@ def _handle_updown_times(self, gen_up_before, redisp_act):
self._gen_uptime[gen_connected_this_timestep] = 1
if (
- np.any(
+ (
self._gen_uptime[gen_disconnected_this]
< self.gen_min_uptime[gen_disconnected_this]
- )
+ ).any()
and not self._ignore_min_up_down_times
):
# i disconnected a generator before the minimum time allowed
@@ -2285,13 +2283,19 @@ def _handle_updown_times(self, gen_up_before, redisp_act):
self._gen_downtime[gen_still_disconnected] += 1
return except_
- def get_obs(self, _update_state=True):
+ def get_obs(self, _update_state=True, _do_copy=True):
"""
Return the observations of the current environment made by the :class:`grid2op.Agent.BaseAgent`.
.. note::
This function is called twice when the env is reset, otherwise once per step
+ _do_copy :
+ .. versionadded: 1.9.2
+
+ Whether or not to make a copy of the returned observation. By default it will do one. Be aware that
+ this might cause trouble if used incorrectly.
+
Returns
-------
res: :class:`grid2op.Observation.BaseObservation`
@@ -2328,8 +2332,10 @@ def get_obs(self, _update_state=True):
self._last_obs = self._observation_space(
env=self, _update_state=_update_state
)
-
- return self._last_obs.copy()
+ if _do_copy:
+ return copy.deepcopy(self._last_obs)
+ else:
+ return self._last_obs
def get_thermal_limit(self):
"""
@@ -2420,10 +2426,10 @@ def _compute_storage(self, action_storage_power):
coeff_p_to_E = (
self.delta_time_seconds / 3600.0
) # TODO optim this is const for all time steps
- if np.any(storage_act):
+ if storage_act.any():
modif = True
this_act_stor = action_storage_power[storage_act]
- eff_ = np.ones(np.sum(storage_act))
+ eff_ = np.ones(storage_act.sum())
if self._parameters.ACTIVATE_STORAGE_LOSS:
fill_storage = (
this_act_stor > 0.0
@@ -2446,7 +2452,7 @@ def _compute_storage(self, action_storage_power):
if modif:
# indx when there is too much energy on the battery
indx_too_high = self._storage_current_charge > self.storage_Emax
- if np.any(indx_too_high):
+ if indx_too_high.any():
delta_ = (
self._storage_current_charge[indx_too_high]
- self.storage_Emax[indx_too_high]
@@ -2458,7 +2464,7 @@ def _compute_storage(self, action_storage_power):
# indx when there is not enough energy on the battery
indx_too_low = self._storage_current_charge < self.storage_Emin
- if np.any(indx_too_low):
+ if indx_too_low.any():
delta_ = (
self._storage_current_charge[indx_too_low]
- self.storage_Emin[indx_too_low]
@@ -2473,7 +2479,7 @@ def _compute_storage(self, action_storage_power):
)
# storage is "load convention", dispatch is "generator convention"
# i need the generator to have the same sign as the action on the batteries
- self._amount_storage = np.sum(self._storage_power)
+ self._amount_storage = self._storage_power.sum()
else:
# battery effect should be removed, so i multiply it by -1.
self._amount_storage = 0.0
@@ -2510,10 +2516,10 @@ def _compute_max_ramp_this_step(self, new_p):
self.gen_pmin[self.gen_redispatchable],
)
- max_total_up = np.sum(th_max - new_p[self.gen_redispatchable])
- max_total_down = np.sum(
+ max_total_up = (th_max - new_p[self.gen_redispatchable]).sum()
+ max_total_down = (
th_min - new_p[self.gen_redispatchable]
- ) # TODO is that it ?
+ ).sum() # TODO is that it ?
return max_total_down, max_total_up
def _aux_update_curtail_env_act(self, new_p):
@@ -2543,7 +2549,7 @@ def _aux_compute_new_p_curtailment(self, new_p, curtailment_vect):
def _aux_handle_curtailment_without_limit(self, action, new_p):
"""modifies the new_p argument !!!! (but not the action)"""
if self.redispatching_unit_commitment_availble and (
- action._modif_curtailment or np.any(self._limit_curtailment != 1.0)
+ action._modif_curtailment or (self._limit_curtailment != 1.0).any()
):
self._aux_update_curtailment_act(action)
@@ -2552,8 +2558,8 @@ def _aux_handle_curtailment_without_limit(self, action, new_p):
)
tmp_sum_curtailment_mw = dt_float(
- np.sum(new_p[gen_curtailed])
- - np.sum(self._gen_before_curtailment[gen_curtailed])
+ new_p[gen_curtailed].sum()
+ - self._gen_before_curtailment[gen_curtailed].sum()
)
self._sum_curtailment_mw = (
@@ -2653,7 +2659,7 @@ def _aux_limit_curtail_storage_if_needed(self, new_p, new_p_th, gen_curtailed):
avail_up = np.minimum(p_max_up, self.gen_max_ramp_up[gen_redisp])
sum_move = (
- np.sum(normal_increase) + self._amount_storage - self._sum_curtailment_mw
+ normal_increase.sum() + self._amount_storage - self._sum_curtailment_mw
)
total_storage_curtail = self._amount_storage - self._sum_curtailment_mw
update_env_act = False
@@ -2661,19 +2667,19 @@ def _aux_limit_curtail_storage_if_needed(self, new_p, new_p_th, gen_curtailed):
if abs(total_storage_curtail) >= self._tol_poly:
# if there is an impact on the curtailment / storage (otherwise I cannot fix anything)
too_much = 0.0
- if sum_move > np.sum(avail_up):
+ if sum_move > avail_up.sum():
# I need to limit curtailment (not enough ramps up available)
- too_much = dt_float(sum_move - np.sum(avail_up) + self._tol_poly)
+ too_much = dt_float(sum_move - avail_up.sum() + self._tol_poly)
self._limited_before = too_much
- elif sum_move < np.sum(avail_down):
+ elif sum_move < avail_down.sum():
# I need to limit storage unit (not enough ramps down available)
- too_much = dt_float(sum_move - np.sum(avail_down) - self._tol_poly)
+ too_much = dt_float(sum_move - avail_down.sum() - self._tol_poly)
self._limited_before = too_much
elif np.abs(self._limited_before) >= self._tol_poly:
# adjust the "mess" I did before by not curtailing enough
# max_action = self.gen_pmax[gen_curtailed] * self._limit_curtailment[gen_curtailed]
update_env_act = True
- too_much = min(np.sum(avail_up) - self._tol_poly, self._limited_before)
+ too_much = min(avail_up.sum() - self._tol_poly, self._limited_before)
self._limited_before -= too_much
too_much = self._limited_before
@@ -2869,7 +2875,6 @@ def _update_alert_properties(self, action, lines_attacked, subs_attacked):
self._time_since_last_attack[~mask_first_ts_attack & (self._time_since_last_attack != -1)] += 1
# update the time already attacked
- self._is_already_attacked[lines_attacked_al] = False
self._is_already_attacked[lines_attacked_al] = True
else:
self._time_since_last_attack[self._time_since_last_attack != -1] += 1
@@ -2947,7 +2952,7 @@ def _aux_register_env_converged(self, disc_lines, action, init_line_status, new_
# finally, build the observation (it's a different one at each step, we cannot reuse the same one)
# THIS SHOULD BE DONE AFTER EVERYTHING IS INITIALIZED !
- self.current_obs = self.get_obs()
+ self.current_obs = self.get_obs(_do_copy=False)
# TODO storage: get back the result of the storage ! with the illegal action when a storage unit
# TODO is non zero and disconnected, this should be ok.
self._time_extract_obs += time.perf_counter() - beg_res
@@ -3260,7 +3265,7 @@ def step(self, action: BaseAction) -> Tuple[BaseObservation, float, bool, dict]:
self._is_alert_used_in_reward = (
self._reward_helper.template_reward.is_alert_used
)
- self.current_obs = self.get_obs(_update_state=False)
+ self.current_obs = self.get_obs(_update_state=False, _do_copy=False)
# update the observation so when it's plotted everything is "shutdown"
self.current_obs.set_game_over(self)
@@ -3738,7 +3743,7 @@ def parameters(self):
.. code-block:: python
- env.params.WHATEVER = NEW_VALUE
+ env.params.WHATEVER = NEW_VALUE # no effet !
This will have absolutely no impact.
diff --git a/grid2op/Environment/BaseMultiProcessEnv.py b/grid2op/Environment/baseMultiProcessEnv.py
similarity index 99%
rename from grid2op/Environment/BaseMultiProcessEnv.py
rename to grid2op/Environment/baseMultiProcessEnv.py
index 8c6130a75..2d09ef88e 100644
--- a/grid2op/Environment/BaseMultiProcessEnv.py
+++ b/grid2op/Environment/baseMultiProcessEnv.py
@@ -14,7 +14,7 @@
from grid2op.dtypes import dt_int
from grid2op.Exceptions import Grid2OpException, MultiEnvException
from grid2op.Space import GridObjects
-from grid2op.Environment import Environment
+from grid2op.Environment.environment import Environment
from grid2op.Action import BaseAction
@@ -158,7 +158,7 @@ def run(self):
data = self.env.action_space.from_vect(data)
obs, reward, done, info = self.env.step(data)
obs_v = obs.to_vect()
- if done or np.any(~np.isfinite(obs_v)):
+ if done or (~np.isfinite(obs_v)).any():
# if done do a reset
res_obs = self.get_obs_ifnotconv()
elif self._obs_to_vect:
diff --git a/grid2op/Environment/Environment.py b/grid2op/Environment/environment.py
similarity index 99%
rename from grid2op/Environment/Environment.py
rename to grid2op/Environment/environment.py
index 00ffac583..ca77cb4f9 100644
--- a/grid2op/Environment/Environment.py
+++ b/grid2op/Environment/environment.py
@@ -12,7 +12,7 @@
import re
import grid2op
-from grid2op.Opponent.OpponentSpace import OpponentSpace
+from grid2op.Opponent import OpponentSpace
from grid2op.dtypes import dt_float, dt_bool, dt_int
from grid2op.Action import (
ActionSpace,
@@ -28,7 +28,7 @@
from grid2op.Backend import Backend
from grid2op.Chronics import ChronicsHandler
from grid2op.VoltageControler import ControlVoltageFromFile, BaseVoltageController
-from grid2op.Environment.BaseEnv import BaseEnv
+from grid2op.Environment.baseEnv import BaseEnv
from grid2op.Opponent import BaseOpponent, NeverAttackBudget
from grid2op.operator_attention import LinearAttentionBudget
@@ -512,7 +512,7 @@ def _handle_compat_glop_version(self, need_process_backend):
# deals with the "sub_pos" vector
for sub_id in range(cls_bk.n_sub):
- if np.any(cls_bk.storage_to_subid == sub_id):
+ if (cls_bk.storage_to_subid == sub_id).any():
stor_ids = np.where(cls_bk.storage_to_subid == sub_id)[0]
stor_locs = cls_bk.storage_to_sub_pos[stor_ids]
for stor_loc in sorted(stor_locs, reverse=True):
@@ -534,7 +534,7 @@ def _handle_compat_glop_version(self, need_process_backend):
# remove storage from the number of element in the substation
for sub_id in range(cls_bk.n_sub):
- cls_bk.sub_info[sub_id] -= np.sum(cls_bk.storage_to_subid == sub_id)
+ cls_bk.sub_info[sub_id] -= (cls_bk.storage_to_subid == sub_id).sum()
# remove storage from the total number of element
cls_bk.dim_topo -= cls_bk.n_storage
diff --git a/grid2op/Environment/MultiEnvMultiProcess.py b/grid2op/Environment/multiEnvMultiProcess.py
similarity index 97%
rename from grid2op/Environment/MultiEnvMultiProcess.py
rename to grid2op/Environment/multiEnvMultiProcess.py
index 1e351339c..647113740 100644
--- a/grid2op/Environment/MultiEnvMultiProcess.py
+++ b/grid2op/Environment/multiEnvMultiProcess.py
@@ -11,7 +11,7 @@
from grid2op.dtypes import dt_int
from grid2op.Exceptions import Grid2OpException, MultiEnvException
from grid2op.Space import GridObjects
-from grid2op.Environment.BaseMultiProcessEnv import BaseMultiProcessEnvironment
+from grid2op.Environment.baseMultiProcessEnv import BaseMultiProcessEnvironment
from grid2op.Action import BaseAction
@@ -67,14 +67,14 @@ def __init__(self, envs, nb_envs, obs_as_class=True, return_info=True, logger=No
'convert it to such with error "{}"'.format(exc_)
)
- if np.any(nb_envs < 0):
+ if (nb_envs < 0).any():
raise MultiEnvException(
'You ask to perform "{}" copy of an environment. This is a negative '
'integer. I cannot do that. Please make sure "nb_envs" argument '
"is all made of strictly positive integers and not {}."
"".format(np.min(nb_envs), nb_envs)
)
- if np.any(nb_envs == 0):
+ if (nb_envs == 0).any():
raise MultiEnvException(
"You ask to perform 0 copy of an environment. This is not supported at "
'the moment. Please make sure "nb_envs" argument '
diff --git a/grid2op/Environment/MultiMixEnv.py b/grid2op/Environment/multiMixEnv.py
similarity index 100%
rename from grid2op/Environment/MultiMixEnv.py
rename to grid2op/Environment/multiMixEnv.py
diff --git a/grid2op/Environment/SingleEnvMultiProcess.py b/grid2op/Environment/singleEnvMultiProcess.py
similarity index 98%
rename from grid2op/Environment/SingleEnvMultiProcess.py
rename to grid2op/Environment/singleEnvMultiProcess.py
index 940a771dd..f648b7e59 100644
--- a/grid2op/Environment/SingleEnvMultiProcess.py
+++ b/grid2op/Environment/singleEnvMultiProcess.py
@@ -8,7 +8,7 @@
import numpy as np
-from grid2op.Environment.BaseMultiProcessEnv import BaseMultiProcessEnvironment
+from grid2op.Environment.baseMultiProcessEnv import BaseMultiProcessEnvironment
class SingleEnvMultiProcess(BaseMultiProcessEnvironment):
diff --git a/grid2op/Environment/timedOutEnv.py b/grid2op/Environment/timedOutEnv.py
index acf41e37e..fcccd7641 100644
--- a/grid2op/Environment/timedOutEnv.py
+++ b/grid2op/Environment/timedOutEnv.py
@@ -9,7 +9,7 @@
import time
from math import floor
from typing import Tuple, Union, List
-from grid2op.Environment.Environment import Environment
+from grid2op.Environment.environment import Environment
from grid2op.Action import BaseAction
from grid2op.Observation import BaseObservation
from grid2op.Exceptions import EnvError
diff --git a/grid2op/Observation/__init__.py b/grid2op/Observation/__init__.py
index 25548b022..fda4d0188 100644
--- a/grid2op/Observation/__init__.py
+++ b/grid2op/Observation/__init__.py
@@ -12,9 +12,11 @@
"NoisyObservation",
"BaseObservation",
"ObservationSpace",
+ "HighResSimCounter",
]
from grid2op.Observation.completeObservation import CompleteObservation
from grid2op.Observation.noisyObservation import NoisyObservation
from grid2op.Observation.baseObservation import BaseObservation
from grid2op.Observation.observationSpace import ObservationSpace
+from grid2op.Observation.highresSimCounter import HighResSimCounter
diff --git a/grid2op/Observation/baseObservation.py b/grid2op/Observation/baseObservation.py
index 7f95ff27c..ca2aa11ae 100644
--- a/grid2op/Observation/baseObservation.py
+++ b/grid2op/Observation/baseObservation.py
@@ -297,13 +297,20 @@ class BaseObservation(GridObjects):
active_alert: :class:`numpy.ndarray`, dtype:bool
.. warning:: Only available if the environment supports the "alert" feature (*eg* "l2rpn_idf_2023").
- This function gives the lines "under alert" at the given observation.
+ .. seealso:: :ref:`grid2op-alert-module` section of the doc for more information
+
+ .. versionadded:: 1.9.1
+
+ This attribute gives the lines "under alert" at the given observation.
It is only relevant for the "real" environment and not for `obs.simulate` nor `obs.get_forecast_env`
- active_alert time_since_last_alert alert_duration total_number_of_alert time_since_last_attack was_alert_used_after_attack
time_since_last_alert: :class:`numpy.ndarray`, dtype:int
.. warning:: Only available if the environment supports the "alert" feature (*eg* "l2rpn_idf_2023").
+ .. seealso:: :ref:`grid2op-alert-module` section of the doc for more information
+
+ .. versionadded:: 1.9.1
+
Give the time since an alert has been raised for each powerline. If you just raise an
alert for attackable line `i` then obs.time_since_last_alert[i] = 0 (and counter
increase by 1 each step).
@@ -313,21 +320,33 @@ class BaseObservation(GridObjects):
alert_duration: :class:`numpy.ndarray`, dtype:int
.. warning:: Only available if the environment supports the "alert" feature (*eg* "l2rpn_idf_2023").
+ .. seealso:: :ref:`grid2op-alert-module` section of the doc for more information
+
+ .. versionadded:: 1.9.1
+
Give the time since an alert has started for all attackable line. If you just raise an
alert for attackable line `i` then obs.time_since_last_alert[i] = 1 and this counter
increase by 1 each step as long as the agent continues to "raise an alert on attackable line i"
When the attackable line `i` is not under an alert then obs.time_since_last_alert[i] = 0
- total_number_of_alerts: :class:`numpy.ndarray`, dtype:int
+ total_number_of_alert: :class:`numpy.ndarray`, dtype:int
.. warning:: Only available if the environment supports the "alert" feature (*eg* "l2rpn_idf_2023").
- This function counts, since the beginning of the current episode, the total number
+ .. seealso:: :ref:`grid2op-alert-module` section of the doc for more information
+
+ .. versionadded:: 1.9.1
+
+ This attribute stores, since the beginning of the current episode, the total number
of alerts (here 1 alert = one alert for 1 powerline for 1 step) sent by the agent.
time_since_last_attack: :class:`numpy.ndarray`, dtype:int
.. warning:: Only available if the environment supports the "alert" feature (*eg* "l2rpn_idf_2023").
+ .. seealso:: :ref:`grid2op-alert-module` section of the doc for more information
+
+ .. versionadded:: 1.9.1
+
Similar to `time_since_last_alert` but for the attack.
For each attackable line `i` it counts the number of steps since the powerline has
@@ -355,7 +374,11 @@ class BaseObservation(GridObjects):
This attribute is only filled
if you use a compatible reward (*eg* :class:`grid2op.Reward.AlertReward`)
as the main reward (or a "combined" reward with this reward being part of it)
-
+
+ .. seealso:: :ref:`grid2op-alert-module` section of the doc for more information
+
+ .. versionadded:: 1.9.1
+
For each attackable line `i` it says:
- obs.was_alert_used_after_attack[i] = 0 => attackable line i has not been attacked
@@ -371,6 +394,10 @@ class BaseObservation(GridObjects):
attack_under_alert: :class:`numpy.ndarray`, dtype:int
.. warning:: Only available if the environment supports the "alert" feature (*eg* "l2rpn_idf_2023").
+ .. seealso:: :ref:`grid2op-alert-module` section of the doc for more information
+
+ .. versionadded:: 1.9.1
+
For each attackable line `i` it says:
- obs.attack_under_alert[i] = 0 => attackable line i has not been attacked OR it
@@ -965,10 +992,10 @@ def state_of(
)
)
- beg_ = int(np.sum(self.sub_info[:substation_id]))
+ beg_ = int(self.sub_info[:substation_id].sum())
end_ = int(beg_ + self.sub_info[substation_id])
topo_sub = self.topo_vect[beg_:end_]
- if np.any(topo_sub > 0):
+ if (topo_sub > 0).any():
nb_bus = (
np.max(topo_sub[topo_sub > 0]) - np.min(topo_sub[topo_sub > 0]) + 1
)
@@ -1373,7 +1400,7 @@ def __compare_stats(self, other, name):
# first special case: there can be Nan there
me_finite = np.isfinite(attr_me)
oth_finite = np.isfinite(attr_other)
- if np.any(me_finite != oth_finite):
+ if (me_finite != oth_finite).any():
return False
# special case of floating points, otherwise vector are never equal
if not np.all(
@@ -1511,7 +1538,7 @@ def where_different(self, other):
for attr_nm in self._attr_eq:
array_ = getattr(diff_, attr_nm)
if array_.dtype == dt_bool:
- if np.any(~array_):
+ if (~array_).any():
res.append(attr_nm)
else:
if (array_.shape[0] > 0) and np.max(np.abs(array_)):
@@ -1997,13 +2024,13 @@ def flow_bus_matrix(self, active_flow=True, as_csr_matrix=False):
if self.shunts_data_available:
sh_vect = self._shunt_q
- nb_lor = np.sum(lor_conn)
- nb_lex = np.sum(lex_conn)
+ nb_lor = lor_conn.sum()
+ nb_lex = lex_conn.sum()
data = np.zeros(nb_bus + nb_lor + nb_lex, dtype=dt_float)
# if two generators / loads / storage unit are connected at the same bus
# this is why i go with matrix product and sparse matrices
- nb_prod = np.sum(prod_conn)
+ nb_prod = prod_conn.sum()
if nb_prod:
bus_prod = np.arange(prod_bus[prod_conn].max() + 1)
map_mat = csr_matrix(
@@ -2014,7 +2041,7 @@ def flow_bus_matrix(self, active_flow=True, as_csr_matrix=False):
data[bus_prod] += map_mat.dot(prod_vect[prod_conn])
# handle load
- nb_load = np.sum(load_conn)
+ nb_load = load_conn.sum()
if nb_load:
bus_load = np.arange(load_bus[load_conn].max() + 1)
map_mat = csr_matrix(
@@ -2025,7 +2052,7 @@ def flow_bus_matrix(self, active_flow=True, as_csr_matrix=False):
data[bus_load] -= map_mat.dot(load_vect[load_conn])
# handle storage
- nb_stor = np.sum(stor_conn)
+ nb_stor = stor_conn.sum()
if nb_stor:
bus_stor = np.arange(stor_bus[stor_conn].max() + 1)
map_mat = csr_matrix(
@@ -2037,7 +2064,7 @@ def flow_bus_matrix(self, active_flow=True, as_csr_matrix=False):
if self.shunts_data_available:
# handle shunts
- nb_shunt = np.sum(sh_conn)
+ nb_shunt = sh_conn.sum()
if nb_shunt:
bus_shunt = np.arange(sh_bus[sh_conn].max() + 1)
map_mat = csr_matrix(
@@ -3522,7 +3549,7 @@ def add_act(self, act, issue_warn=True):
Returns
-------
- res: :class:`grid2op.Observation.Observation`
+ res: :class:`grid2op.Observation.BaseObservation`
The resulting observation. Note that this observation is not initialized with everything.
It is only relevant when you want to study the resulting topology after you applied an
action. Lots of `res` attributes are empty.
@@ -3605,7 +3632,7 @@ def add_act(self, act, issue_warn=True):
& (line_ex_set_bus <= 0)
& (res.topo_vect[self.line_ex_pos_topo_vect] == -1)
)
- if np.any(tmp):
+ if tmp.any():
id_issue_ex = np.where(tmp)[0]
if issue_warn:
warnings.warn(error_no_bus_set.format(id_issue_ex))
@@ -3617,7 +3644,7 @@ def add_act(self, act, issue_warn=True):
& (line_or_set_bus <= 0)
& (res.topo_vect[self.line_or_pos_topo_vect] == -1)
)
- if np.any(tmp):
+ if tmp.any():
id_issue_or = np.where(tmp)[0]
if issue_warn:
warnings.warn(error_no_bus_set.format(id_issue_or))
@@ -3675,7 +3702,7 @@ def add_act(self, act, issue_warn=True):
res.line_status[disco_line] = False
# handle reconnected powerlines
- if np.any(reco_line):
+ if reco_line.any():
if "set_bus" in act.authorized_keys:
line_ex_set_bus = 1 * act.line_ex_set_bus
line_or_set_bus = 1 * act.line_or_set_bus
@@ -3684,8 +3711,8 @@ def add_act(self, act, issue_warn=True):
line_or_set_bus = np.zeros(res.n_line, dtype=dt_int)
if issue_warn and (
- np.any(line_or_set_bus[reco_line] == 0)
- or np.any(line_ex_set_bus[reco_line] == 0)
+ (line_or_set_bus[reco_line] == 0).any()
+ or (line_ex_set_bus[reco_line] == 0).any()
):
warnings.warn(
'A powerline has been reconnected with a "change_status" action without '
@@ -3707,7 +3734,7 @@ def add_act(self, act, issue_warn=True):
if "redispatch" in act.authorized_keys:
redisp = act.redispatch
- if np.any(redisp != 0) and issue_warn:
+ if (redisp != 0).any() and issue_warn:
warnings.warn(
"You did redispatching on this action. Redispatching is heavily transformed "
"by the environment (consult the documentation about the modeling of the "
@@ -3716,7 +3743,7 @@ def add_act(self, act, issue_warn=True):
if "set_storage" in act.authorized_keys:
storage_p = act.storage_p
- if np.any(storage_p != 0) and issue_warn:
+ if (storage_p != 0).any() and issue_warn:
warnings.warn(
"You did action on storage units in this action. This implies performing some "
"redispatching which is heavily transformed "
@@ -4438,7 +4465,7 @@ def update_after_reward(self, env):
You probably don't have to use except if you develop a specific
observation class !
- .. info::
+ .. note::
If you want to develop a new type of observation with a new type of reward, you can use the
`env._reward_to_obs` attribute (dictionary) in the reward to pass information to the
observation (in this function).
diff --git a/grid2op/Observation/completeObservation.py b/grid2op/Observation/completeObservation.py
index e74273199..201e94f00 100644
--- a/grid2op/Observation/completeObservation.py
+++ b/grid2op/Observation/completeObservation.py
@@ -67,9 +67,9 @@ class CompleteObservation(BaseObservation):
26. :attr:`BaseObservation.time_before_cooldown_sub` representation of the cooldown time on the substations
[:attr:`grid2op.Space.GridObjects.n_sub` elements]
27. :attr:`BaseObservation.time_next_maintenance` number of timestep before the next maintenance (-1 means
- no maintenance are planned, 0 a maintenance is in operation) [:attr:`BaseObservation.n_line` elements]
+ no maintenance are planned, 0 a maintenance is in operation) [:attr:`grid2op.Space.GridObjects.n_line` elements]
28. :attr:`BaseObservation.duration_next_maintenance` duration of the next maintenance. If a maintenance
- is taking place, this is the number of timestep before it ends. [:attr:`BaseObservation.n_line` elements]
+ is taking place, this is the number of timestep before it ends. [:attr:`grid2op.Space.GridObjects.n_line` elements]
29. :attr:`BaseObservation.target_dispatch` the target dispatch for each generator
[:attr:`grid2op.Space.GridObjects.n_gen` elements]
30. :attr:`BaseObservation.actual_dispatch` the actual dispatch for each generator
@@ -107,18 +107,32 @@ class CompleteObservation(BaseObservation):
constraint) [``bool``]
43. :attr:`BaseObservation.curtailment_limit` : the current curtailment limit (if any)
[:attr:`grid2op.Space.GridObjects.n_gen` elements]
- 44. :attr:`BaseObservation.curtailment_limit_effective` TODO
- 45. :attr:`BaseObservation.current_step` TODO
- 46. :attr:`BaseObservation.max_step` TODO
- 47. :attr:`BaseObservation.delta_time` TODO
- 48. :attr:`BaseObservation.gen_margin_up` TODO
- 49. :attr:`BaseObservation.gen_margin_down` TODO
- 50. :attr:`BaseObservation.last_alert` TODO
- 51. :attr:`BaseObservation.time_since_last_alert` TODO
- 52. :attr:`BaseObservation.alert_duration` TODO
- 53. :attr:`BaseObservation.total_number_of_alert` TODO
- 54. :attr:`BaseObservation.time_since_last_attack` TODO
- 55. :attr:`BaseObservation.was_alert_used_after_attack` TODO
+ 44. :attr:`BaseObservation.curtailment_limit_effective` Limit (in ratio of gen_pmax) imposed on
+ each renewable generator effectively imposed by the environment.
+ 45. :attr:`BaseObservation.current_step` the number of steps since the beginning of the episode (it's
+ 0 for the observation after a call to `env.reset()`)
+ 46. :attr:`BaseObservation.max_step` maximum number of steps that can be done by the environment.
+ When :attr:`BaseObservation.current_step` is :attr:`BaseObservation.max_step` the the environment
+ is done.
+ 47. :attr:`BaseObservation.delta_time` Amount of time (in minutes) represented by a step. In general, there
+ are the equivalent of 5 minutes between two steps.
+ 48. :attr:`BaseObservation.gen_margin_up` From how much can you increase each generators production between this
+ step and the next.
+ 49. :attr:`BaseObservation.gen_margin_down` From how much can you decrease each generators production between this
+ step and the next.
+ 50. :attr:`BaseObservation.active_alert` This attribute gives the lines "under alert" at the given observation.
+ 51. :attr:`BaseObservation.time_since_last_alert` Give the time since an alert has been raised for each powerline.
+ 52. :attr:`BaseObservation.alert_duration` Give the time since an alert has started for all attackable line.
+ 53. :attr:`BaseObservation.total_number_of_alert` Total number of alerts since the beginning of the episode sent by
+ the agent
+ 54. :attr:`BaseObservation.time_since_last_attack` For each attackable line `i` it counts the number of steps since the powerline has
+ been attacked
+ 55. :attr:`BaseObservation.was_alert_used_after_attack` For each attackable line `i` it says if an alert has been used or not
+ for the computation of the reward: +1 means "used and the alert was correct", -1 means "used and the alert was not correct"
+ and 0 means "not used"
+ 56. :attr:`BaseObservation.attack_under_alert` For each attackable line `i` it says if an alert has been sent (+1) or not (-1)
+ for each attackable line currently under attack.
+
"""
attr_list_vect = [
diff --git a/grid2op/Observation/observationSpace.py b/grid2op/Observation/observationSpace.py
index 0cb64871f..db81a8991 100644
--- a/grid2op/Observation/observationSpace.py
+++ b/grid2op/Observation/observationSpace.py
@@ -40,18 +40,18 @@ class ObservationSpace(SerializableObservationSpace):
_simulate_parameters: :class:`grid2op.Parameters.Parameters`
Type of Parameters used to compute powerflow for the forecast.
- rewardClass: ``type``
+ rewardClass: Union[type, BaseReward]
Class used by the :class:`grid2op.Environment.Environment` to send information about its state to the
- :class:`grid2op.BaseAgent.BaseAgent`. You can change this class to differentiate between the reward of output of
+ :class:`grid2op.Agent.BaseAgent`. You can change this class to differentiate between the reward of output of
:func:`BaseObservation.simulate` and the reward used to train the BaseAgent.
action_helper_env: :class:`grid2op.Action.ActionSpace`
BaseAction space used to create action during the :func:`BaseObservation.simulate`
- reward_helper: :class:`grid2op.Reward.HelperReward`
+ reward_helper: :class:`grid2op.Reward.RewardHelper`
BaseReward function used by the the :func:`BaseObservation.simulate` function.
- obs_env: :class:`_ObsEnv`
+ obs_env: :class:`grid2op.Environment._Obsenv._ObsEnv`
Instance of the environment used by the BaseObservation Helper to provide forcecast of the grid state.
_empty_obs: :class:`BaseObservation`
@@ -78,11 +78,11 @@ def __init__(
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
- Env: requires :attr:`grid2op.Environment.parameters` and :attr:`grid2op.Environment.backend` to be valid
+ Env: requires :attr:`grid2op.Environment.BaseEnv.parameters` and :attr:`grid2op.Environment.BaseEnv.backend` to be valid
"""
# lazy import to prevent circular references (Env -> Observation -> Obs Space -> _ObsEnv -> Env)
- from grid2op.Environment._ObsEnv import _ObsEnv
+ from grid2op.Environment._obsEnv import _ObsEnv
if actionClass is None:
from grid2op.Action import CompleteAction
@@ -123,6 +123,8 @@ def __init__(
if _with_obs_env:
self._create_obs_env(env)
self.reward_helper.initialize(self.obs_env)
+ for k, v in self.obs_env.other_rewards.items():
+ v.reset(self.obs_env)
else:
self.with_forecast = False
self.obs_env = None
@@ -148,7 +150,7 @@ def set_real_env_kwargs(self, env):
if not self.with_forecast:
return
# I don't need the backend nor the chronics_handler
- from grid2op.Environment.Environment import Environment
+ from grid2op.Environment import Environment
self._real_env_kwargs = Environment.get_kwargs(env, False, False)
# remove the parameters anyways (the 'forecast parameters will be used
@@ -445,9 +447,9 @@ def reset(self, real_env):
self.__nb_simulate_called_this_step = 0
self.__nb_simulate_called_this_episode = 0
if self.with_forecast:
- self.obs_env._reward_helper.reset(real_env)
+ self.obs_env._reward_helper.reset(self.obs_env)
for k, v in self.obs_env.other_rewards.items():
- v.reset(real_env)
+ v.reset(self.obs_env)
self.obs_env.reset()
self._env_param = copy.deepcopy(real_env.parameters)
diff --git a/grid2op/Opponent/BaseActionBudget.py b/grid2op/Opponent/BaseActionBudget.py
index d110bd9ef..c9f7c5e3c 100644
--- a/grid2op/Opponent/BaseActionBudget.py
+++ b/grid2op/Opponent/BaseActionBudget.py
@@ -54,5 +54,5 @@ def __call__(self, attack):
"".format(type(attack), self.action_space.actionClass)
)
aff_lines, aff_subs = attack.get_topological_impact()
- cost = np.sum(aff_lines) + np.sum(aff_subs)
+ cost = aff_lines.sum() + aff_subs.sum()
return cost
diff --git a/grid2op/Opponent/WeightedRandomOpponent.py b/grid2op/Opponent/WeightedRandomOpponent.py
index ea970711c..a1e9cd549 100644
--- a/grid2op/Opponent/WeightedRandomOpponent.py
+++ b/grid2op/Opponent/WeightedRandomOpponent.py
@@ -172,7 +172,7 @@ def attack(self, observation, agent_action, env_action, budget, previous_fails):
# If all attackable lines are disconnected, do not attack
status = observation.line_status[self._lines_ids]
- if not np.sum(status):
+ if not status.sum():
return None, 0
available_attacks = self._attacks[status]
diff --git a/grid2op/Plot/BasePlot.py b/grid2op/Plot/BasePlot.py
index f1fb6d98b..c5f9d78f4 100644
--- a/grid2op/Plot/BasePlot.py
+++ b/grid2op/Plot/BasePlot.py
@@ -631,7 +631,7 @@ def _get_topo_coord(self, sub_id, observation, elements):
theta_z = [cmath.phase((el - z_sub)) for el in buses_z]
# try to have nodes "in opposition" to one another
- NN = np.array(nb_co) / np.sum(nb_co)
+ NN = np.array(nb_co) / nb_co.sum()
diff_theta = theta_z[0] - theta_z[1]
# alpha = cmath.pi + diff_theta
alpha = -cmath.pi + diff_theta
diff --git a/grid2op/PlotGrid/BasePlot.py b/grid2op/PlotGrid/BasePlot.py
index c921a2066..365425b17 100644
--- a/grid2op/PlotGrid/BasePlot.py
+++ b/grid2op/PlotGrid/BasePlot.py
@@ -1045,7 +1045,7 @@ def plot_info(
# rescaling to have range 0 - 1.0
tmp = observation.prod_p[np.isfinite(observation.prod_p)]
- if np.any(np.isfinite(observation.prod_p)):
+ if (np.isfinite(observation.prod_p)).any():
observation.prod_p -= (
np.min(tmp) - 1e-1
) # so the min is 1e-1 otherwise 0.0 is plotted as black
diff --git a/grid2op/PlotGrid/PlotPlotly.py b/grid2op/PlotGrid/PlotPlotly.py
index 02c97e58e..db6c0bec3 100644
--- a/grid2op/PlotGrid/PlotPlotly.py
+++ b/grid2op/PlotGrid/PlotPlotly.py
@@ -58,10 +58,12 @@ def __init__(
gen_radius=12,
show_gen_txt=False,
show_load_txt=False,
+ show_storage_txt=False,
):
super().__init__(observation_space, width, height, scale, grid_layout)
self.show_gen_txt = show_gen_txt
self.show_load_txt = show_load_txt
+ self.show_storage_txt = show_storage_txt
self._responsive = responsive
self._sub_radius = sub_radius
self._sub_fill_color = "PaleTurquoise"
@@ -81,6 +83,12 @@ def __init__(
self._gen_line_width = 1
self._gen_prefix = "b"
+ self._storage_radius = 12
+ self._storage_fill_color = "Purple"
+ self._storage_line_color = "black"
+ self._storage_line_width = 1
+ self._storage_prefix = "d"
+
self._line_prefix = "a"
self.line_color_scheme = (
pc.sequential.Blues_r[:4]
@@ -643,12 +651,73 @@ def update_powerline(
def draw_legend(self, figure, observation):
figure.update_layout(showlegend=False)
+
+ def _draw_storage_txt(self, name, pos_x, pos_y, text, text_pos):
+ return go.Scatter(
+ x=[pos_x],
+ y=[pos_y],
+ text=[text],
+ name=name,
+ mode="text",
+ hoverinfo="skip",
+ textposition=text_pos,
+ showlegend=False,
+ )
+
+ def _draw_storage_circle(self, pos_x, pos_y, name, text):
+ marker_dict = dict(
+ size=self._storage_radius,
+ color=self._storage_fill_color,
+ showscale=False,
+ line=dict(width=self._storage_line_width, color=self._storage_line_color),
+ )
+ return go.Scatter(
+ x=[pos_x],
+ y=[pos_y],
+ mode="markers",
+ text=[text],
+ name=self._storage_prefix + name,
+ marker=marker_dict,
+ showlegend=False,
+ )
+
+ def _draw_storage_line(self, pos_x, pos_y, sub_x, sub_y):
+ style_line = dict(color="black", width=self._storage_line_width)
+
+ line_trace = go.Scatter(
+ x=[pos_x, sub_x],
+ y=[pos_y, sub_y],
+ hoverinfo="skip",
+ line=style_line,
+ showlegend=False,
+ )
+ return line_trace
+
+ def _draw_storage_bus(self, pos_x, pos_y, dir_x, dir_y, bus, storage_name):
+ bus = bus if bus > 0 else 0
+ marker_dict = dict(
+ size=self._line_bus_radius,
+ color=self._line_bus_colors[bus],
+ showscale=False,
+ )
+ center_x = pos_x + dir_x * (self._sub_radius - self._line_bus_radius)
+ center_y = pos_y + dir_y * (self._sub_radius - self._line_bus_radius)
+ trace_name = self._storage_prefix + self._bus_prefix + storage_name
+ return go.Scatter(
+ x=[center_x],
+ y=[center_y],
+ marker=marker_dict,
+ name=trace_name,
+ hoverinfo="skip",
+ showlegend=False,
+ )
+
def draw_storage(
self,
figure,
observation,
- storage_name,
storage_id,
+ storage_name,
storage_bus,
storage_value,
storage_unit,
@@ -658,6 +727,48 @@ def draw_storage(
sub_y,
):
# TODO storage doc
- # TODO storage plot
- # TODO update the plotly with storage units
- pass
+ dir_x, dir_y = pltu.norm_from_points(sub_x, sub_y, pos_x, pos_y)
+ nd_x, nd_y = pltu.norm_from_points(sub_x, sub_y, pos_x, pos_y)
+ storage_text = ""
+ if storage_value is not None:
+ txt_x = pos_x + nd_x * (self._storage_radius / 2)
+ txt_y = pos_y + nd_y * (self._storage_radius / 2)
+ text_pos = self._textpos_from_dir(dir_x, dir_y)
+ storage_text = storage_name + "
"
+ storage_text += pltu.format_value_unit(storage_value, storage_unit)
+ if self.show_storage_txt:
+ trace1 = self._draw_storage_txt(storage_name, txt_x, txt_y, storage_text, text_pos)
+ figure.add_trace(trace1)
+ trace2 = self._draw_storage_line(pos_x, pos_y, sub_x, sub_y)
+ figure.add_trace(trace2)
+ trace3 = self._draw_storage_circle(pos_x, pos_y, storage_name, storage_text)
+ figure.add_trace(trace3)
+ trace4 = self._draw_storage_bus(sub_x, sub_y, dir_x, dir_y, storage_bus, storage_name)
+ figure.add_trace(trace4)
+
+ def update_storage(
+ self,
+ figure,
+ observation,
+ storage_name,
+ storage_id,
+ storage_bus,
+ storage_value,
+ storage_unit,
+ pos_x,
+ pos_y,
+ sub_x,
+ sub_y,
+ ):
+
+ storage_text = ""
+ if storage_value is not None:
+ storage_text = storage_name + "
"
+ storage_text += pltu.format_value_unit(storage_value, storage_unit)
+ figure.update_traces(text=storage_text, selector=dict(name=storage_name))
+ circle_name = self._storage_prefix + storage_name
+ if self.show_storage_txt:
+ figure.update_traces(text=storage_text, selector=dict(name=circle_name))
+ storage_marker = dict(color=self._line_bus_colors[storage_bus])
+ storage_select_name = self._storage_prefix + self._bus_prefix + storage_name
+ figure.update_traces(marker=storage_marker, selector=dict(name=storage_select_name))
diff --git a/grid2op/Reward/__init__.py b/grid2op/Reward/__init__.py
index 1c3567c79..54bacfb73 100644
--- a/grid2op/Reward/__init__.py
+++ b/grid2op/Reward/__init__.py
@@ -24,8 +24,8 @@
"AlertReward",
"_AlarmScore",
"_NewRenewableSourcesUsageScore",
- "_AssistantConfidenceScore",
- "_AssistantCostScore"
+ "_AlertCostScore",
+ "_AlertTrustScore"
]
from grid2op.Reward.constantReward import ConstantReward
@@ -52,7 +52,8 @@
from grid2op.Reward.l2rpn_wcci2022_scorefun import L2RPNWCCI2022ScoreFun
from grid2op.Reward.alertReward import AlertReward
from grid2op.Reward._newRenewableSourcesUsageScore import _NewRenewableSourcesUsageScore
-from grid2op.Reward._assistantScore import _AssistantConfidenceScore, _AssistantCostScore
+from grid2op.Reward._alertCostScore import _AlertCostScore
+from grid2op.Reward._alertTrustScore import _AlertTrustScore
import warnings
diff --git a/grid2op/Reward/_alarmScore.py b/grid2op/Reward/_alarmScore.py
index 4fa1f2e29..b4c69ff5d 100644
--- a/grid2op/Reward/_alarmScore.py
+++ b/grid2op/Reward/_alarmScore.py
@@ -65,14 +65,7 @@ def __init__(self, logger=None):
self.disc_lines_all_before_cascade = []
self.n_line = None
-
- # This class remembers the past state of the grid, this does not make sense for the "simulate" env
- # so i deactivate it in this case.
- from grid2op.Environment._ObsEnv import (
- _ObsEnv,
- ) # to avoid circular dependencies
-
- self._deactivate_reward_cls = (_ObsEnv,)
+ self._i_am_simulate = True
def initialize(self, env):
if not env._has_attention_budget:
@@ -88,6 +81,7 @@ def reset(self, env):
super().reset(env)
self.window_disconnection = max(self.best_time - self.window_size, 4)
self.disc_lines_all_before_cascade = []
+ self._i_am_simulate = self.is_simulated_env(env)
def _lines_disconnected_first(self, disc_lines_at_cascading_time):
"""
@@ -107,17 +101,17 @@ def _lines_disconnected_first(self, disc_lines_at_cascading_time):
self.disc_lines_all_before_cascade[step] >= 0
] = True
- if np.sum(disc_lines_to_consider_for_score) == 0:
+ if disc_lines_to_consider_for_score.sum() == 0:
disc_lines_to_consider_for_score = disc_lines_at_cascading_time == 0
# if we are there, it is because we have identified before that the failure is due to disconnected powerlines
- assert np.any(disc_lines_to_consider_for_score)
+ assert (disc_lines_to_consider_for_score).any()
# we transform the vector so that disconnected lines have a zero, to be coherent with env._disc_lines
return 1 - disc_lines_to_consider_for_score
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
- if self.is_simulated_env(env):
+ if self._i_am_simulate:
return self.reward_no_game_over
disc_lines_now = env._disc_lines
diff --git a/grid2op/Reward/_alertCostScore.py b/grid2op/Reward/_alertCostScore.py
new file mode 100644
index 000000000..5967bd229
--- /dev/null
+++ b/grid2op/Reward/_alertCostScore.py
@@ -0,0 +1,83 @@
+# Copyright (c) 2023, RTE (https://www.rte-france.com)
+# See AUTHORS.txt
+# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
+# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
+# you can obtain one at http://mozilla.org/MPL/2.0/.
+# SPDX-License-Identifier: MPL-2.0
+# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
+
+import numpy as np
+from grid2op.Reward.baseReward import BaseReward
+from grid2op.Reward._newRenewableSourcesUsageScore import _NewRenewableSourcesUsageScore
+from grid2op.dtypes import dt_float
+from grid2op.Exceptions import Grid2OpException
+import warnings
+
+#TODO
+# Test this class comprehensively if usage is revived.
+# Was originally thought for use in L2RPN 2023 Competition, but eventually not selected for use.
+# Tests were disregarded at some stage of these developments.
+class _AlertCostScore(BaseReward):
+ """
+
+ INTERNAL
+ .. danger:: This function is not used and not tested
+
+ .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
+ It **must not** serve as a reward. This scored needs to be **MAXIMIZED**,
+ as it is a negative! Also, this "reward" is not scaled or anything. Use it as your
+ own risk.
+
+ Implemented as a reward to make it easier to use in the context of the L2RPN competitions, this reward is based on the "alert feature"
+ where the agent is asked to send information about potential line overload issue on the grid after unpredictable powerline
+ disconnection (attack of the opponent).
+ The alerts are assessed once per attack. In this scheme, this "reward" computed the assistant"cost score", which penalized the number of alerts
+ the assistant have produced during an episode. It should not be used to train an agent.
+ For information, it will not be used for the L2RPN_IDF_2023 competition.
+ """
+ def __init__(self, logger=None):
+ BaseReward.__init__(self, logger=logger)
+ self.reward_min = dt_float(-1.0)
+ self.reward_max = dt_float(1.0)
+ self._is_simul_env = False
+ self.total_nb_alertes_possible = None
+ self.total_nb_alerts = None
+
+ warnings.warn("This class is not tested, use it with care")
+
+ def initialize(self, env):
+
+ if not env.dim_alerts > 0:
+ raise Grid2OpException(
+ 'Impossible to use the "_AlertCostScore" with an environment for which the Assistant feature '
+ 'is disabled. Please make sure "env.dim_alerts" is > 0 or '
+ "change the reward class with `grid2op.make(..., reward_class=AnyOtherReward)`"
+ )
+ self.reset(env)
+
+ def reset(self, env):
+ self._is_simul_env = self.is_simulated_env(env)
+ if self._is_simul_env:
+ return
+
+ #self.total_nb_alertes_possible = (env.chronics_handler.max_timestep() + 1) * (env.dim_alerts)
+ self.total_nb_alerts = 0
+
+ def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
+ if self._is_simul_env:
+ return dt_float(0.)
+
+ if is_done:
+ self.total_nb_alertes_possible = env.nb_time_step * env.dim_alerts
+ ratio_nb_alerts = 100 * ( 1 - self.total_nb_alerts / self.total_nb_alertes_possible)
+ return self._penalization_fun(ratio_nb_alerts)
+ else:
+ self.total_nb_alerts = env._total_number_of_alert
+ return dt_float(0.)
+
+ @staticmethod
+ def _penalization_fun(x, center=80):
+ return _NewRenewableSourcesUsageScore._surlinear_func_curtailment(x=x, center=center)
+
+
+
diff --git a/grid2op/Reward/_alertTrustScore.py b/grid2op/Reward/_alertTrustScore.py
new file mode 100644
index 000000000..525e5d5f9
--- /dev/null
+++ b/grid2op/Reward/_alertTrustScore.py
@@ -0,0 +1,164 @@
+# Copyright (c) 2023, RTE (https://www.rte-france.com)
+# See AUTHORS.txt
+# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
+# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
+# you can obtain one at http://mozilla.org/MPL/2.0/.
+# SPDX-License-Identifier: MPL-2.0
+# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
+
+import numpy as np
+from grid2op.Reward import AlertReward
+from grid2op.dtypes import dt_float
+
+class _AlertTrustScore(AlertReward):
+ """
+
+ INTERNAL
+
+ .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
+ It **must not** serve as a reward. This scored needs to be **MAXIMIZED**,
+ as it is a negative! Also, this "reward" is not scaled or anything. Use it as your
+ own risk.
+
+ Implemented as a reward to make it easier to use in the context of the L2RPN competitions, this reward is based on the "alert feature"
+ where the agent is asked to send information about potential line overload issue on the grid after unpredictable powerline
+ disconnection (attack of the opponent).
+ The alerts are assessed once per attack and cumulated over the episode. In this scheme, this "reward" computed the assistant "score",
+ which assesses how well the agent is aware of its capacities to deal with a situation during an episode.
+ It should not be used to train an agent.
+
+ """
+
+ def __init__(self,
+ logger=None,
+ reward_min_no_blackout=-1.0,
+ reward_min_blackout=-50,
+ reward_max_no_blackout=0.0,
+ reward_max_blackout=0.0,
+ reward_end_episode_bonus=0.0,
+ min_score=-3):
+
+ super().__init__(logger,
+ reward_min_no_blackout,
+ reward_min_blackout,
+ reward_max_no_blackout,
+ reward_max_blackout,
+ reward_end_episode_bonus)
+
+ self.min_score = dt_float(min_score)
+ self.max_score = dt_float(1.0)
+ self.blackout_encountered = False
+
+ def initialize(self, env):
+ self._is_simul_env = self.is_simulated_env(env)
+
+ self.cumulated_reward = 0
+ #KPIs
+ self.total_nb_attacks = 0
+ self.nb_last_attacks = 0 #attacks in the AlertTimeWindow before done
+
+ #TODO
+ #self.total_nb_alerts = 0
+ #self.alert_attack_no_blackout = 0
+ #self.alert_attack_blackout = 0
+ #self.no_alert_attack_no_blackout = 0
+ #self.no_alert_attack_blackout = 0
+ #self.blackout_encountered = False
+ return super().initialize(env)
+
+ def reset(self, env):
+ super().reset(env)
+ self.blackout_encountered = False
+ self.cumulated_reward = 0
+ #KPIs
+ self.total_nb_attacks = 0
+ self.nb_last_attacks = 0
+
+ # TODO
+ #self.total_nb_alerts = 0
+ #self.alert_attack_no_blackout = 0
+ #self.alert_attack_blackout = 0
+ #self.no_alert_attack_no_blackout = 0
+ #self.no_alert_attack_blackout = 0
+ #self.blackout_encountered = False
+
+ def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
+
+ score_ep = 0.
+ if self._is_simul_env:
+ return score_ep
+
+ self.blackout_encountered = self.is_in_blackout(has_error, is_done)
+
+ res = super().__call__(action, env, has_error, is_done, is_illegal, is_ambiguous)
+ self.cumulated_reward += res
+
+ is_attack = (env._time_since_last_attack == 0).any()#even if there are simultaneous attacks, we consider this as a single attack event
+ self.total_nb_attacks += is_attack
+
+ # TODO
+ #lines_alerted_beforeattack = np.equal(env._time_since_last_alert, env._time_since_last_attack + 1) and lines_attacked
+ #self.total_nb_alerts += np.sum(lines_alerted_beforeattack)
+
+ if is_done:
+ indexes_to_look = (np.arange(-self.time_window, 1) + self._current_id) % self._nrows_array # include current step (hence the np.arange(..., **1**))
+ ts_attack_in_order = self._ts_attack[indexes_to_look, :]
+ self.nb_last_attacks = ts_attack_in_order.any(axis=1).sum()
+
+ cm_reward_min_ep, cm_reward_max_ep = self._compute_min_max_reward(self.total_nb_attacks,
+ self.nb_last_attacks)
+ score_ep = self._normalisation_fun(self.cumulated_reward, cm_reward_min_ep, cm_reward_max_ep,
+ self.min_score, self.max_score, self.blackout_encountered)
+
+ # TODO
+ # if self.blackout_encountered:
+ # lines_attacked_dangerzone = (env._time_since_last_attack >= 0) * (env._time_since_last_attack < SURVIVOR_TIMESTEPS)
+ #
+ # self.alert_attack_blackout += 1. * any(lines_alerted_beforeattack[lines_attacked_dangerzone])
+ # self.no_alert_attack_blackout += 1. * any(~lines_alerted_beforeattack[lines_attacked_dangerzone])
+ # else :
+ # lines_attacked_no_blackout = env._time_since_last_attack > 0
+ #
+ # self.alert_attack_no_blackout += np.sum(lines_alerted_beforeattack[lines_attacked_no_blackout])
+ # self.no_alert_attack_no_blackout += np.sum(~lines_alerted_beforeattack[lines_attacked_no_blackout])
+
+ return score_ep
+
+ else:
+ # TODO
+ # lines_attacked_no_blackout = env._time_since_last_attack == SURVIVOR_TIMESTEPS
+ #
+ # self.alert_attack_no_blackout += np.sum(lines_alerted_beforeattack[lines_attacked_no_blackout])
+ # self.no_alert_attack_no_blackout += np.sum(~lines_alerted_beforeattack[lines_attacked_no_blackout])
+
+ return score_ep
+
+ @staticmethod
+ def _normalisation_fun(cm_reward, cm_reward_min_ep, cm_reward_max_ep,min_score,max_score,is_blackout):
+
+ #in case cm_reward_min_ep=cm_reward_max_ep=0, score can be maximum or 0
+ if cm_reward_min_ep == cm_reward_max_ep:
+ if is_blackout:
+ score_ep = 0.0
+ else:
+ score_ep = max_score
+ else:
+ standardized_score = (cm_reward - cm_reward_min_ep) / (cm_reward_max_ep - cm_reward_min_ep)#denominator cannot be 0 given first if condition
+ score_ep = min_score + (max_score - min_score) * standardized_score
+ return score_ep
+
+ def _compute_min_max_reward(self, nb_attacks,nb_last_attacks):
+
+ if self.blackout_encountered:
+ if nb_last_attacks == 0: #in the case the blackout is not tied to a recent attack
+ cm_reward_min_ep = self.reward_min_no_blackout * nb_attacks
+ cm_reward_max_ep = self.reward_max_no_blackout * nb_attacks
+ elif nb_last_attacks >= 1:#in the case the blackout can be tied to one or several recent attacks
+ cm_reward_min_ep = self.reward_min_no_blackout * (nb_attacks - nb_last_attacks) + self.reward_min_blackout
+ cm_reward_max_ep = self.reward_max_no_blackout * (nb_attacks - nb_last_attacks) + self.reward_max_blackout
+ else:
+ cm_reward_min_ep = self.reward_min_no_blackout * nb_attacks
+ cm_reward_max_ep = self.reward_max_no_blackout * nb_attacks + self.reward_end_episode_bonus
+
+ return cm_reward_min_ep, cm_reward_max_ep
+
diff --git a/grid2op/Reward/_assistantScore.py b/grid2op/Reward/_assistantScore.py
index 707c46299..30d1f6f13 100644
--- a/grid2op/Reward/_assistantScore.py
+++ b/grid2op/Reward/_assistantScore.py
@@ -28,14 +28,11 @@ class _AssistantConfidenceScore(BaseReward):
def __init__(self, logger=None):
BaseReward.__init__(self, logger=logger)
- def __initialize__(self, env):
+ def initialize(self, env):
self.reset(env)
- def reset(self):
- pass
-
def __call__(self, env, obs, is_done):
- pass
+ return 0.
class _AssistantCostScore(BaseReward):
"""
@@ -55,11 +52,8 @@ class _AssistantCostScore(BaseReward):
def __init__(self, logger=None):
BaseReward.__init__(self, logger=logger)
- def __initialize__(self, env):
+ def initialize(self, env):
self.reset(env)
- def reset(self):
- pass
-
def __call__(self, env, obs, is_done):
- pass
\ No newline at end of file
+ return 0.
diff --git a/grid2op/Reward/_newRenewableSourcesUsageScore.py b/grid2op/Reward/_newRenewableSourcesUsageScore.py
index d55d7ef71..5570f912a 100644
--- a/grid2op/Reward/_newRenewableSourcesUsageScore.py
+++ b/grid2op/Reward/_newRenewableSourcesUsageScore.py
@@ -38,12 +38,13 @@ def initialize(self, env):
self.reset(env)
def reset(self, env):
- self._is_simul_env = is_simulated_env(env)
+ self._is_simul_env = self.is_simulated_env(env)
if self._is_simul_env:
return
- self.gen_res_p_curtailed_list = np.zeros(env.chronics_handler.max_timestep() + 1)
- self.gen_res_p_before_curtail_list = np.zeros(env.chronics_handler.max_timestep() + 1)
+ max_timesteps = env.chronics_handler.max_timestep() + 1
+ self.gen_res_p_curtailed_list = np.zeros(max_timesteps)
+ self.gen_res_p_before_curtail_list = np.zeros(max_timesteps)
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
@@ -56,15 +57,20 @@ def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
self.gen_res_p_before_curtail_list[env.nb_time_step] = gen_nres_p_before_curtail
return dt_float(0.)
else:
- ratio_nres_usage = 100 * np.sum(self.gen_res_p_curtailed_list[1:]) / np.sum(self.gen_res_p_before_curtail_list[1:])
+ total_sum = self.gen_res_p_before_curtail_list[1:].sum()
+ if abs(total_sum) <= 1e-6:
+ # no nres in the scenario agent cannot possibly make any curtailment
+ # it uses all the available renewable energy
+ return self._surlinear_func_curtailment(100.)
+ ratio_nres_usage = 100 * self.gen_res_p_curtailed_list[1:].sum() / total_sum
return self._surlinear_func_curtailment(ratio_nres_usage)
@staticmethod
def _get_total_nres_usage(env):
nres_mask = env.gen_renewable
gen_p, *_ = env.backend.generators_info()
- gen_nres_p_before_curtail = np.sum(env._gen_before_curtailment[nres_mask])
- gen_nres_p_effective = np.sum(gen_p[nres_mask])
+ gen_nres_p_before_curtail = env._gen_before_curtailment[nres_mask].sum()
+ gen_nres_p_effective = gen_p[nres_mask].sum()
return gen_nres_p_effective, gen_nres_p_before_curtail
@@ -74,17 +80,5 @@ def _surlinear_func_curtailment(x, center=80, eps=1e-6):
f_surlinear = lambda x: x * np.log(x)
f_centralized = lambda x : f_surlinear(x) - f_surlinear(center)
f_standardizer= lambda x : np.ones_like(x) * f_centralized(100) * (x >= center) - np.ones_like(x) * f_centralized(50) * (x < center)
-
return f_centralized(x) / f_standardizer(x)
-
-
-#to wait before PR Laure
-def is_simulated_env(env):
-
- # to prevent cyclical import
- from grid2op.Environment._ObsEnv import _ObsEnv
- from grid2op.Environment._forecast_env import _ForecastEnv
-
- # This reward is not compatible with simulations
- return isinstance(env, (_ObsEnv, _ForecastEnv))
diff --git a/grid2op/Reward/alarmReward.py b/grid2op/Reward/alarmReward.py
index 94e9a1eb3..d9d2cf87a 100644
--- a/grid2op/Reward/alarmReward.py
+++ b/grid2op/Reward/alarmReward.py
@@ -110,7 +110,7 @@ def _mult_for_zone(self, alarm, disc_lines, env):
lines_disconnected_first = np.where(disc_lines == 0)[0]
if (
- np.sum(alarm) > 1
+ alarm.sum() > 1
): # if we have more than one zone in the alarm, we cannot discrtiminate, no bonus points
return res
@@ -127,7 +127,7 @@ def _mult_for_zone(self, alarm, disc_lines, env):
list_zone_ids = np.where(np.isin(env.alarms_area_names, list_zone_names))[0]
# and finally, award some extra points if one of the zone, containing one of the powerline disconnected
# by protection is in the alarm
- if np.any(alarm[list_zone_ids]):
+ if alarm[list_zone_ids].any():
res *= self.mult_for_right_zone
return res
diff --git a/grid2op/Reward/alertReward.py b/grid2op/Reward/alertReward.py
index 1f3abb29e..3f7d90924 100644
--- a/grid2op/Reward/alertReward.py
+++ b/grid2op/Reward/alertReward.py
@@ -8,6 +8,7 @@
import numpy as np
+from typing import Optional
from grid2op.Reward.baseReward import BaseReward
from grid2op.dtypes import dt_float, dt_bool, dt_int
@@ -80,34 +81,31 @@ def __init__(self,
reward_end_episode_bonus=1.0):
BaseReward.__init__(self, logger=logger)
- self.reward_min_no_blackout = dt_float(reward_min_no_blackout)
- self.reward_min_blackout = dt_float(reward_min_blackout)
- self.reward_max_no_blackout = dt_float(reward_max_no_blackout)
- self.reward_max_blackout = dt_float(reward_max_blackout)
- self.reward_end_episode_bonus = dt_float(reward_end_episode_bonus)
- self.reward_no_game_over = dt_float(0.0)
+ self.reward_min_no_blackout : float = dt_float(reward_min_no_blackout)
+ self.reward_min_blackout : float = dt_float(reward_min_blackout)
+ self.reward_max_no_blackout : float = dt_float(reward_max_no_blackout)
+ self.reward_max_blackout : float = dt_float(reward_max_blackout)
+ self.reward_end_episode_bonus : float = dt_float(reward_end_episode_bonus)
+ self.reward_no_game_over : float = dt_float(0.0)
- self._reward_range_blackout = (self.reward_max_blackout - self.reward_min_blackout)
+ self._reward_range_blackout : float = (self.reward_max_blackout - self.reward_min_blackout)
- self.total_time_steps = dt_int(0.0)
- self.time_window = None
+ self.total_time_steps : Optional[int] = dt_int(0)
+ self.time_window : Optional[int] = None
- self._ts_attack : np.ndarray = None
+ self._ts_attack : Optional[np.ndarray] = None
self._current_id : int = 0
- self._lines_currently_attacked : np.ndarray = None
- self._alert_launched : np.ndarray = None
- self._nrows_array : int = None
+ self._lines_currently_attacked : Optional[np.ndarray] = None
+ self._alert_launched : Optional[np.ndarray] = None
+ self._nrows_array : Optional[int] = None
self._i_am_simulate : bool = False
+
def initialize(self, env: "grid2op.Environment.BaseEnv"):
self.total_time_steps = env.max_episode_duration()
self.time_window = env.parameters.ALERT_TIME_WINDOW
self._nrows_array = self.time_window + 2
-
- # TODO simulate env stuff !
-
- # TODO vectors proper size
self._ts_attack = np.full((self._nrows_array, type(env).dim_alerts), False, dtype=dt_bool)
self._alert_launched = np.full((self._nrows_array, type(env).dim_alerts), False, dtype=dt_bool)
self._current_id = 0
@@ -115,6 +113,14 @@ def initialize(self, env: "grid2op.Environment.BaseEnv"):
self._i_am_simulate = self.is_simulated_env(env)
return super().initialize(env)
+
+ def reset(self, env):
+ self._ts_attack[:,:] = False
+ self._alert_launched[:,:] = False
+ self._current_id = 0
+ self._lines_currently_attacked[:] = False
+ self._i_am_simulate = self.is_simulated_env(env)
+ return super().reset(env)
def _update_attack(self, env):
if env.infos["opponent_attack_line"] is None:
@@ -181,7 +187,7 @@ def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
# if there is no attack, I do nothing
indexes_to_look = (np.arange(-self.time_window, 1) + self._current_id) % self._nrows_array # include current step (hence the np.arange(..., **1**))
ts_attack_in_order = self._ts_attack[indexes_to_look, :]
- has_attack = np.any(ts_attack_in_order)
+ has_attack = (ts_attack_in_order).any()
if has_attack:
# I need to check the alarm for the attacked lines
res = self._compute_score_attack_blackout(env, ts_attack_in_order, indexes_to_look)
@@ -189,7 +195,7 @@ def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
# no blackout: i check the first step in the window before me to see if there is an attack,
index_window = (self._current_id - self.time_window) % self._nrows_array
lines_attack = self._ts_attack[index_window, :]
- if np.any(lines_attack):
+ if lines_attack.any():
# prev_ind = (index_window - 1) % self._nrows_array
# I don't need the "-1" because the action is already BEFORE the observation in the reward.
prev_ind = index_window
diff --git a/grid2op/Reward/baseReward.py b/grid2op/Reward/baseReward.py
index 320bd5d07..164f54e0b 100644
--- a/grid2op/Reward/baseReward.py
+++ b/grid2op/Reward/baseReward.py
@@ -120,7 +120,7 @@ def __init__(self, logger: logging.Logger=None):
def is_simulated_env(self, env):
# to prevent cyclical import
- from grid2op.Environment._ObsEnv import _ObsEnv
+ from grid2op.Environment._obsEnv import _ObsEnv
from grid2op.Environment._forecast_env import _ForecastEnv
return isinstance(env, (_ObsEnv, _ForecastEnv))
diff --git a/grid2op/Reward/distanceReward.py b/grid2op/Reward/distanceReward.py
index 1979f415f..310421a7c 100644
--- a/grid2op/Reward/distanceReward.py
+++ b/grid2op/Reward/distanceReward.py
@@ -45,7 +45,7 @@ def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
return self.reward_min
# Get topo from env
- obs = env.get_obs()
+ obs = env.get_obs(_do_copy=False)
topo = obs.topo_vect
idx = 0
diff --git a/grid2op/Reward/economicReward.py b/grid2op/Reward/economicReward.py
index a1f219a2f..ff46d994b 100644
--- a/grid2op/Reward/economicReward.py
+++ b/grid2op/Reward/economicReward.py
@@ -52,14 +52,14 @@ def initialize(self, env):
"Impossible to use the EconomicReward reward with an environment without generators"
"cost. Please make sure env.redispatching_unit_commitment_availble is available."
)
- self.worst_cost = dt_float(np.sum(env.gen_cost_per_MW * env.gen_pmax) * env.delta_time_seconds / 3600.0)
+ self.worst_cost = dt_float((env.gen_cost_per_MW * env.gen_pmax).sum() * env.delta_time_seconds / 3600.0)
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
if has_error or is_illegal or is_ambiguous:
res = self.reward_min
else:
# compute the cost of the grid
- res = dt_float(np.sum(env.get_obs().prod_p * env.gen_cost_per_MW) * env.delta_time_seconds / 3600.0)
+ res = dt_float((env.get_obs(_do_copy=False).prod_p * env.gen_cost_per_MW).sum() * env.delta_time_seconds / 3600.0)
# we want to minimize the cost by maximizing the reward so let's take the opposite
res *= dt_float(-1.0)
# to be sure it's positive, add the highest possible cost
diff --git a/grid2op/Reward/l2RPNReward.py b/grid2op/Reward/l2RPNReward.py
index 674e7ae59..c15e5ade7 100644
--- a/grid2op/Reward/l2RPNReward.py
+++ b/grid2op/Reward/l2RPNReward.py
@@ -55,7 +55,7 @@ def initialize(self, env):
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
if not is_done and not has_error:
line_cap = self.__get_lines_capacity_usage(env)
- res = np.sum(line_cap)
+ res = line_cap.sum()
else:
# no more data to consider, no powerflow has been run, reward is what it is
res = self.reward_min
diff --git a/grid2op/Reward/l2RPNSandBoxScore.py b/grid2op/Reward/l2RPNSandBoxScore.py
index 18b7a7d26..e94b2f2dc 100644
--- a/grid2op/Reward/l2RPNSandBoxScore.py
+++ b/grid2op/Reward/l2RPNSandBoxScore.py
@@ -56,7 +56,7 @@ def _get_gen_p(self, env):
return gen_p
def _get_losses(self, env, gen_p, load_p):
- return (np.sum(gen_p, dtype=dt_float) - np.sum(load_p, dtype=dt_float)) * env.delta_time_seconds / 3600.0
+ return (gen_p.sum(dtype=dt_float) - load_p.sum(dtype=dt_float)) * env.delta_time_seconds / 3600.0
def _get_marginal_cost(self, env):
gen_activeprod_t = env._gen_activeprod_t
@@ -67,7 +67,7 @@ def _get_marginal_cost(self, env):
def _get_redisp_cost(self, env, p_t):
actual_dispatch = env._actual_dispatch
c_redispatching = (
- np.sum(np.abs(actual_dispatch)) * p_t * env.delta_time_seconds / 3600.0
+ np.abs(actual_dispatch).sum() * p_t * env.delta_time_seconds / 3600.0
)
return c_redispatching
@@ -86,7 +86,7 @@ def _get_loss_cost(self, env, p_t):
return c_loss
def _get_storage_cost(self, env, p_t):
- c_storage = np.sum(np.abs(env._storage_power)) * p_t * env.delta_time_seconds / 3600.0
+ c_storage = np.abs(env._storage_power).sum() * p_t * env.delta_time_seconds / 3600.0
return c_storage
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
diff --git a/grid2op/Reward/l2rpn_wcci2022_scorefun.py b/grid2op/Reward/l2rpn_wcci2022_scorefun.py
index 0e30db41c..bb1f51406 100644
--- a/grid2op/Reward/l2rpn_wcci2022_scorefun.py
+++ b/grid2op/Reward/l2rpn_wcci2022_scorefun.py
@@ -39,5 +39,5 @@ def __init__(self,
def _get_storage_cost(self, env, p_t):
"""storage cost is a flat 10 € / MWh instead of depending on the marginal cost"""
- c_storage = np.sum(np.abs(env._storage_power)) * self.storage_cost * env.delta_time_seconds / 3600.0
+ c_storage = np.abs(env._storage_power).sum() * self.storage_cost * env.delta_time_seconds / 3600.0
return c_storage
diff --git a/grid2op/Reward/linesCapacityReward.py b/grid2op/Reward/linesCapacityReward.py
index 50d1ce843..e251f3870 100644
--- a/grid2op/Reward/linesCapacityReward.py
+++ b/grid2op/Reward/linesCapacityReward.py
@@ -45,16 +45,13 @@ def __init__(self, logger=None):
self.reward_min = dt_float(0.0)
self.reward_max = dt_float(1.0)
- def initialize(self, env):
- pass
-
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
if has_error or is_illegal or is_ambiguous:
return self.reward_min
- obs = env.get_obs()
- n_connected = np.sum(obs.line_status.astype(dt_float))
- usage = np.sum(obs.rho[obs.line_status == True])
+ obs = env.get_obs(_do_copy=False)
+ n_connected = dt_float(obs.line_status.sum())
+ usage = obs.rho[obs.line_status == True].sum()
usage = np.clip(usage, 0.0, float(n_connected))
reward = np.interp(
n_connected - usage,
diff --git a/grid2op/Reward/linesReconnectedReward.py b/grid2op/Reward/linesReconnectedReward.py
index ce27f26aa..3715961e1 100644
--- a/grid2op/Reward/linesReconnectedReward.py
+++ b/grid2op/Reward/linesReconnectedReward.py
@@ -48,7 +48,7 @@ def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
return self.reward_min
# Get obs from env
- obs = env.get_obs()
+ obs = env.get_obs(_do_copy=False)
# All lines ids
lines_id = np.arange(env.n_line)
diff --git a/grid2op/Reward/n1Reward.py b/grid2op/Reward/n1Reward.py
index b31a9904e..9d11561ef 100644
--- a/grid2op/Reward/n1Reward.py
+++ b/grid2op/Reward/n1Reward.py
@@ -8,7 +8,7 @@
import copy
from grid2op.Reward import BaseReward
-from grid2op.Action._BackendAction import _BackendAction
+from grid2op.Action._backendAction import _BackendAction
class N1Reward(BaseReward):
diff --git a/grid2op/Reward/redispReward.py b/grid2op/Reward/redispReward.py
index 564340a75..a51ec00ec 100644
--- a/grid2op/Reward/redispReward.py
+++ b/grid2op/Reward/redispReward.py
@@ -111,7 +111,7 @@ def generate_class_custom_params(
# on linux it's fine, i can create new classes for each meta parameters
nm_res = f"RedispReward_{alpha_redisph:.2f}_{min_load_ratio:.2f}_{worst_losses_ratio:.2f}"
nm_res += f"_{min_reward:.2f}_{least_losses_ratio:.2f}_{reward_illegal_ambiguous:.2f}"
- nm_res = re.sub("\\.", "@", nm_res)
+ nm_res = nm_res.replace(".", "@")
cls_attr_as_dict = {
"_alpha_redisp": dt_float(alpha_redisph),
"_min_load_ratio": dt_float(min_load_ratio),
@@ -148,10 +148,10 @@ def initialize(self, env):
cls_ = type(self)
worst_marginal_cost = np.max(env.gen_cost_per_MW)
- worst_load = dt_float(np.sum(env.gen_pmax))
+ worst_load = env.gen_pmax.sum(dtype=dt_float)
# it's not the worst, but definitely an upper bound
worst_losses = dt_float(cls_._worst_losses_ratio) * worst_load
- worst_redisp = cls_._alpha_redisp * np.sum(env.gen_pmax) # not realistic, but an upper bound
+ worst_redisp = cls_._alpha_redisp * env.gen_pmax.sum() # not realistic, but an upper bound
self.max_regret = (worst_losses + worst_redisp) * worst_marginal_cost * env.delta_time_seconds / 3600.0
self.reward_min = dt_float(cls_._min_reward)
@@ -181,7 +181,7 @@ def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
gen_p, *_ = env.backend.generators_info()
load_p, *_ = env.backend.loads_info()
# don't forget to convert MW to MWh !
- losses = (np.sum(gen_p) - np.sum(load_p)) * env.delta_time_seconds / 3600.0
+ losses = (gen_p.sum() - load_p.sum()) * env.delta_time_seconds / 3600.0
# compute the marginal cost
gen_activeprod_t = env._gen_activeprod_t
@@ -190,14 +190,14 @@ def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
# redispatching amount
actual_dispatch = env._actual_dispatch
redisp_cost = (
- self._alpha_redisp * np.sum(np.abs(actual_dispatch)) * marginal_cost * env.delta_time_seconds / 3600.0
+ self._alpha_redisp * np.abs(actual_dispatch).sum() * marginal_cost * env.delta_time_seconds / 3600.0
)
# cost of losses
losses_cost = losses * marginal_cost
# cost of storage
- c_storage = np.sum(np.abs(env._storage_power)) * marginal_cost * env.delta_time_seconds / 3600.0
+ c_storage = np.abs(env._storage_power).sum() * marginal_cost * env.delta_time_seconds / 3600.0
# total "regret"
regret = losses_cost + redisp_cost + c_storage
@@ -206,6 +206,6 @@ def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
reward = self.max_regret - regret
# divide it by load, to be less sensitive to load variation
- res = dt_float(reward / np.sum(load_p))
+ res = dt_float(reward / load_p.sum())
return res
diff --git a/grid2op/Rules/LookParam.py b/grid2op/Rules/LookParam.py
index fd4f91cce..13445e612 100644
--- a/grid2op/Rules/LookParam.py
+++ b/grid2op/Rules/LookParam.py
@@ -34,13 +34,13 @@ def __call__(self, action, env):
powerline_status = env.get_current_line_status()
aff_lines, aff_subs = action.get_topological_impact(powerline_status)
- if np.sum(aff_lines) > env._parameters.MAX_LINE_STATUS_CHANGED:
+ if aff_lines.sum() > env._parameters.MAX_LINE_STATUS_CHANGED:
ids = np.where(aff_lines)[0]
return False, IllegalAction(
"More than {} line status affected by the action: {}"
"".format(env.parameters.MAX_LINE_STATUS_CHANGED, ids)
)
- if np.sum(aff_subs) > env._parameters.MAX_SUB_CHANGED:
+ if aff_subs.sum() > env._parameters.MAX_SUB_CHANGED:
ids = np.where(aff_subs)[0]
return False, IllegalAction(
"More than {} substation affected by the action: {}"
diff --git a/grid2op/Rules/PreventDiscoStorageModif.py b/grid2op/Rules/PreventDiscoStorageModif.py
index 3f610659d..ba52472f1 100644
--- a/grid2op/Rules/PreventDiscoStorageModif.py
+++ b/grid2op/Rules/PreventDiscoStorageModif.py
@@ -37,7 +37,7 @@ def __call__(self, action, env):
)
not_set_status = storage_set_bus[storage_disco] <= 0
not_change_status = ~storage_change_bus[storage_disco]
- if np.any(power_modif_disco & not_set_status & not_change_status):
+ if (power_modif_disco & not_set_status & not_change_status).any():
tmp_ = power_modif_disco & not_set_status & not_change_status
return False, IllegalAction(
f"Attempt to modify the power produced / absorbed by a storage unit "
diff --git a/grid2op/Rules/PreventReconnection.py b/grid2op/Rules/PreventReconnection.py
index 439ec42cc..464c3653e 100644
--- a/grid2op/Rules/PreventReconnection.py
+++ b/grid2op/Rules/PreventReconnection.py
@@ -35,7 +35,7 @@ def __call__(self, action, env):
powerline_status = env.get_current_line_status()
aff_lines, aff_subs = action.get_topological_impact(powerline_status)
- if np.any(env._times_before_line_status_actionable[aff_lines] > 0):
+ if (env._times_before_line_status_actionable[aff_lines] > 0).any():
# i tried to act on a powerline too shortly after a previous action
# or shut down due to an overflow or opponent or hazards or maintenance
ids = np.where((env._times_before_line_status_actionable > 0) & aff_lines)[
@@ -47,7 +47,7 @@ def __call__(self, action, env):
)
)
- if np.any(env._times_before_topology_actionable[aff_subs] > 0):
+ if (env._times_before_topology_actionable[aff_subs] > 0).any():
# I tried to act on a topology too shortly after a previous action
ids = np.where((env._times_before_topology_actionable > 0) & aff_subs)[0]
return False, IllegalAction(
diff --git a/grid2op/Rules/rulesByArea.py b/grid2op/Rules/rulesByArea.py
index 5995f62a2..8335a1697 100644
--- a/grid2op/Rules/rulesByArea.py
+++ b/grid2op/Rules/rulesByArea.py
@@ -8,6 +8,9 @@
import numpy as np
from itertools import chain
+import warnings
+import copy
+
from grid2op.Rules.BaseRules import BaseRules
from grid2op.Rules.LookParam import LookParam
from grid2op.Rules.PreventReconnection import PreventReconnection
@@ -16,6 +19,7 @@
IllegalAction, Grid2OpException
)
+
class RulesByArea(BaseRules):
"""
This subclass combine :class:`PreventReconnection`, :class: `PreventDiscoStorageModif` to be applied on the whole grid at once,
@@ -51,12 +55,25 @@ def __init__(self, areas_list):
----------
areas_list : list of areas, each placeholder containing the ids of substations of each defined area
"""
- self.substations_id_by_area = {i : sorted(k) for i,k in enumerate(areas_list)}
-
+ if isinstance(areas_list, list):
+ self.substations_id_by_area = {i : sorted(k) for i, k in enumerate(areas_list)}
+ elif isinstance(areas_list, dict):
+ self.substations_id_by_area = {i : copy.deepcopy(k) for i, k in areas_list.items()}
+ else:
+ raise Grid2OpException("Impossible to create a rules when area_list is neither a list nor a dict")
+ needs_cleaning = False
+ for area_nm, area_subs in self.substations_id_by_area.items():
+ if not np.array_equal(np.unique(area_subs), area_subs):
+ warnings.warn(f"There are duplicate substation for area {area_nm}")
+ needs_cleaning = True
+ if needs_cleaning:
+ self.substations_id_by_area = {i : np.unique(k) for i, k in self.substations_id_by_area.items()}
+
def initialize(self, env):
"""
- This function is used to inform the class instance about the environment specification and check no substation of the grid are left ouside an area.
+ This function is used to inform the class instance about the environment
+ specification and check no substation of the grid are left ouside an area.
Parameters
----------
env: :class:`grid2op.Environment.Environment`
@@ -66,7 +83,8 @@ def initialize(self, env):
n_sub = env.n_sub
n_sub_rule = np.sum([len(set(list_ids)) for list_ids in self.substations_id_by_area.values()])
if n_sub_rule != n_sub:
- raise Grid2OpException("The number of listed ids of substations in rule initialization does not match the number of substations of the chosen environement. Look for missing ids or doublon")
+ raise Grid2OpException("The number of listed ids of substations in rule initialization does not match the number of "
+ "substations of the chosen environement. Look for missing ids or doublon")
else:
self.lines_id_by_area = {key : sorted(list(chain(*[[item for item in np.where(env.line_or_to_subid == subid)[0]
] for subid in subid_list]))) for key,subid_list in self.substations_id_by_area.items()}
@@ -100,13 +118,13 @@ def _lookparam_byarea(self, action, env):
powerline_status = env.get_current_line_status()
aff_lines, aff_subs = action.get_topological_impact(powerline_status)
- if any([np.sum(aff_lines[line_ids]) > env._parameters.MAX_LINE_STATUS_CHANGED for line_ids in self.lines_id_by_area.values()]):
+ if any([(aff_lines[line_ids]).sum() > env._parameters.MAX_LINE_STATUS_CHANGED for line_ids in self.lines_id_by_area.values()]):
ids = [[k for k in np.where(aff_lines)[0] if k in line_ids] for line_ids in self.lines_id_by_area.values()]
return False, IllegalAction(
"More than {} line status affected by the action in one area: {}"
"".format(env.parameters.MAX_LINE_STATUS_CHANGED, ids)
)
- if any([np.sum(aff_subs[sub_ids]) > env._parameters.MAX_SUB_CHANGED for sub_ids in self.substations_id_by_area.values()]):
+ if any([(aff_subs[sub_ids]).sum() > env._parameters.MAX_SUB_CHANGED for sub_ids in self.substations_id_by_area.values()]):
ids = [[k for k in np.where(aff_subs)[0] if k in sub_ids] for sub_ids in self.substations_id_by_area.values()]
return False, IllegalAction(
"More than {} substation affected by the action in one area: {}"
diff --git a/grid2op/Space/GridObjects.py b/grid2op/Space/GridObjects.py
index c4189b987..592528a1e 100644
--- a/grid2op/Space/GridObjects.py
+++ b/grid2op/Space/GridObjects.py
@@ -439,7 +439,28 @@ class GridObjects:
alarms_lines_area = {} # for each lines of the grid, gives on which area(s) it is # TODO
alarms_area_lines = [] # for each area in the grid, gives which powerlines it contains # TODO
- # TODO specify the unit of redispatching data MWh, $/MW etc.
+ dim_alerts: `int`
+ The dimension of the "alert space" (number of powerline on which the agent can sent an alert)
+
+ .. seealso:: :ref:`grid2op-alert-module` section of the doc for more information
+
+ .. versionadded:: 1.9.1
+
+ alertable_line_names: `np.ndarray`
+ Name (in order) of each powerline on which the agent can send an alarm. It has the size corresponding to :attr:`GridObjects.dim_alerts`
+ and contain names of powerlines (string).
+
+ .. seealso:: :ref:`grid2op-alert-module` section of the doc for more information
+
+ .. versionadded:: 1.9.1
+
+ alertable_line_ids: `np.ndarray`
+ Id (in order) of each powerline on which the agent can send an alarm. It has the size corresponding to :attr:`GridObjects.dim_alerts`
+ and contain ids of powerlines (integer).
+
+ .. seealso:: :ref:`grid2op-alert-module` section of the doc for more information
+
+ .. versionadded:: 1.9.1
"""
BEFORE_COMPAT_VERSION = "neurips_2020_compat"
@@ -591,6 +612,7 @@ class GridObjects:
alertable_line_ids = []
def __init__(self):
+ """nothing to do when an object of this class is created, the information is held by the class attributes"""
pass
@classmethod
@@ -1123,8 +1145,8 @@ def from_vect(self, vect, check_legit=True):
# if np.any(~np.isfinite(tmp)) and default_nan:
# raise NonFiniteElement("None finite number in from_vect detected")
- if attr_nm not in type(self).attr_nan_list_set and np.any(
- ~np.isfinite(tmp)
+ if attr_nm not in type(self).attr_nan_list_set and (
+ (~np.isfinite(tmp)).any()
):
raise NonFiniteElement("None finite number in from_vect detected")
@@ -1190,7 +1212,7 @@ def size(self):
print("The size of the action space is {}".format(env.action_space.size()))
"""
- res = np.sum(self.shape()).astype(dt_int)
+ res = self.shape().sum(dtype=dt_int)
return res
@classmethod
@@ -1213,7 +1235,7 @@ def _aux_pos_big_topo(cls, vect_to_subid, vect_to_sub_pos):
"""
res = np.zeros(shape=vect_to_subid.shape, dtype=dt_int)
for i, (sub_id, my_pos) in enumerate(zip(vect_to_subid, vect_to_sub_pos)):
- obj_before = np.sum(cls.sub_info[:sub_id])
+ obj_before = cls.sub_info[:sub_id].sum()
res[i] = obj_before + my_pos
return res
@@ -1869,7 +1891,7 @@ def assert_grid_correct_cls(cls):
)
)
try:
- if np.any(~np.isfinite(tmp)):
+ if (~np.isfinite(tmp)).any():
raise EnvError(
"The grid could not be loaded properly."
"One of the vector is made of non finite elements, check the sub_info, *_to_subid, "
@@ -1890,7 +1912,7 @@ def assert_grid_correct_cls(cls):
"and self.n_sub ({})".format(len(cls.sub_info), cls.n_sub)
)
if (
- np.sum(cls.sub_info)
+ cls.sub_info.sum()
!= cls.n_load + cls.n_gen + 2 * cls.n_line + cls.n_storage
):
err_msg = "The number of elements of elements is not consistent between self.sub_info where there are "
@@ -1899,7 +1921,7 @@ def assert_grid_correct_cls(cls):
"the _grid ({})."
)
err_msg = err_msg.format(
- np.sum(cls.sub_info),
+ cls.sub_info.sum(),
cls.n_load + cls.n_gen + 2 * cls.n_line + cls.n_storage,
)
raise IncorrectNumberOfElements(err_msg)
@@ -2003,7 +2025,7 @@ def assert_grid_correct_cls(cls):
cls.storage_pos_topo_vect.flatten(),
)
)
- if len(np.unique(concat_topo)) != np.sum(cls.sub_info):
+ if len(np.unique(concat_topo)) !=cls.sub_info.sum():
raise EnvError(
"2 different objects would have the same id in the topology vector, or there would be"
"an empty component in this vector."
@@ -2048,7 +2070,7 @@ def assert_grid_correct_cls(cls):
)
# no empty bus: at least one element should be present on each bus
- if np.any(cls.sub_info < 1):
+ if (cls.sub_info < 1).any():
if not grid2op.Space.space_utils._WARNING_ISSUED_FOR_SUB_NO_ELEM:
warnings.warn(
f"There are {np.sum(cls.sub_info < 1)} substations where no 'controlable' elements "
@@ -2243,76 +2265,76 @@ def _check_validity_storage_data(cls):
"self.storage_charging_efficiency.shape[0] != self.n_storage"
)
- if np.any(~np.isfinite(cls.storage_Emax)):
+ if (~np.isfinite(cls.storage_Emax)).any():
raise BackendError("np.any(~np.isfinite(self.storage_Emax))")
- if np.any(~np.isfinite(cls.storage_Emin)):
+ if (~np.isfinite(cls.storage_Emin)).any():
raise BackendError("np.any(~np.isfinite(self.storage_Emin))")
- if np.any(~np.isfinite(cls.storage_max_p_prod)):
+ if (~np.isfinite(cls.storage_max_p_prod)).any():
raise BackendError("np.any(~np.isfinite(self.storage_max_p_prod))")
- if np.any(~np.isfinite(cls.storage_max_p_absorb)):
+ if (~np.isfinite(cls.storage_max_p_absorb)).any():
raise BackendError("np.any(~np.isfinite(self.storage_max_p_absorb))")
- if np.any(~np.isfinite(cls.storage_marginal_cost)):
+ if (~np.isfinite(cls.storage_marginal_cost)).any():
raise BackendError("np.any(~np.isfinite(self.storage_marginal_cost))")
- if np.any(~np.isfinite(cls.storage_loss)):
+ if (~np.isfinite(cls.storage_loss)).any():
raise BackendError("np.any(~np.isfinite(self.storage_loss))")
- if np.any(~np.isfinite(cls.storage_charging_efficiency)):
+ if (~np.isfinite(cls.storage_charging_efficiency)).any():
raise BackendError("np.any(~np.isfinite(self.storage_charging_efficiency))")
- if np.any(~np.isfinite(cls.storage_discharging_efficiency)):
+ if (~np.isfinite(cls.storage_discharging_efficiency)).any():
raise BackendError(
"np.any(~np.isfinite(self.storage_discharging_efficiency))"
)
- if np.any(cls.storage_Emax < cls.storage_Emin):
+ if (cls.storage_Emax < cls.storage_Emin).any():
tmp = np.where(cls.storage_Emax < cls.storage_Emin)[0]
raise BackendError(
f"storage_Emax < storage_Emin for storage units with ids: {tmp}"
)
- if np.any(cls.storage_Emax < 0.0):
+ if (cls.storage_Emax < 0.0).any():
tmp = np.where(cls.storage_Emax < 0.0)[0]
raise BackendError(
f"self.storage_Emax < 0. for storage units with ids: {tmp}"
)
- if np.any(cls.storage_Emin < 0.0):
+ if (cls.storage_Emin < 0.0).any():
tmp = np.where(cls.storage_Emin < 0.0)[0]
raise BackendError(
f"self.storage_Emin < 0. for storage units with ids: {tmp}"
)
- if np.any(cls.storage_max_p_prod < 0.0):
+ if (cls.storage_max_p_prod < 0.0).any():
tmp = np.where(cls.storage_max_p_prod < 0.0)[0]
raise BackendError(
f"self.storage_max_p_prod < 0. for storage units with ids: {tmp}"
)
- if np.any(cls.storage_max_p_absorb < 0.0):
+ if (cls.storage_max_p_absorb < 0.0).any():
tmp = np.where(cls.storage_max_p_absorb < 0.0)[0]
raise BackendError(
f"self.storage_max_p_absorb < 0. for storage units with ids: {tmp}"
)
- if np.any(cls.storage_loss < 0.0):
+ if (cls.storage_loss < 0.0).any():
tmp = np.where(cls.storage_loss < 0.0)[0]
raise BackendError(
f"self.storage_loss < 0. for storage units with ids: {tmp}"
)
- if np.any(cls.storage_discharging_efficiency <= 0.0):
+ if (cls.storage_discharging_efficiency <= 0.0).any():
tmp = np.where(cls.storage_discharging_efficiency <= 0.0)[0]
raise BackendError(
f"self.storage_discharging_efficiency <= 0. for storage units with ids: {tmp}"
)
- if np.any(cls.storage_discharging_efficiency > 1.0):
+ if (cls.storage_discharging_efficiency > 1.0).any():
tmp = np.where(cls.storage_discharging_efficiency > 1.0)[0]
raise BackendError(
f"self.storage_discharging_efficiency > 1. for storage units with ids: {tmp}"
)
- if np.any(cls.storage_charging_efficiency < 0.0):
+ if (cls.storage_charging_efficiency < 0.0).any():
tmp = np.where(cls.storage_charging_efficiency < 0.0)[0]
raise BackendError(
f"self.storage_charging_efficiency < 0. for storage units with ids: {tmp}"
)
- if np.any(cls.storage_charging_efficiency > 1.0):
+ if (cls.storage_charging_efficiency > 1.0).any():
tmp = np.where(cls.storage_charging_efficiency > 1.0)[0]
raise BackendError(
f"self.storage_charging_efficiency > 1. for storage units with ids: {tmp}"
)
- if np.any(cls.storage_loss > cls.storage_max_p_absorb):
+ if (cls.storage_loss > cls.storage_max_p_absorb).any():
tmp = np.where(cls.storage_loss > cls.storage_max_p_absorb)[0]
raise BackendError(
f"Some storage units are such that their loss (self.storage_loss) is higher "
@@ -2501,11 +2523,11 @@ def _check_validity_dispathcing_data(cls):
"(gen_renewable) when redispatching is supposed to be available."
)
- if np.any(cls.gen_min_uptime < 0):
+ if (cls.gen_min_uptime < 0).any():
raise InvalidRedispatching(
"Minimum uptime of generator (gen_min_uptime) cannot be negative"
)
- if np.any(cls.gen_min_downtime < 0):
+ if (cls.gen_min_downtime < 0).any():
raise InvalidRedispatching(
"Minimum downtime of generator (gen_min_downtime) cannot be negative"
)
@@ -2514,23 +2536,23 @@ def _check_validity_dispathcing_data(cls):
if not el in ["solar", "wind", "hydro", "thermal", "nuclear"]:
raise InvalidRedispatching("Unknown generator type : {}".format(el))
- if np.any(cls.gen_pmin < 0.0):
+ if (cls.gen_pmin < 0.0).any():
raise InvalidRedispatching("One of the Pmin (gen_pmin) is negative")
- if np.any(cls.gen_pmax < 0.0):
+ if (cls.gen_pmax < 0.0).any():
raise InvalidRedispatching("One of the Pmax (gen_pmax) is negative")
- if np.any(cls.gen_max_ramp_down < 0.0):
+ if (cls.gen_max_ramp_down < 0.0).any():
raise InvalidRedispatching(
"One of the ramp up (gen_max_ramp_down) is negative"
)
- if np.any(cls.gen_max_ramp_up < 0.0):
+ if (cls.gen_max_ramp_up < 0.0).any():
raise InvalidRedispatching(
"One of the ramp down (gen_max_ramp_up) is negative"
)
- if np.any(cls.gen_startup_cost < 0.0):
+ if (cls.gen_startup_cost < 0.0).any():
raise InvalidRedispatching(
"One of the start up cost (gen_startup_cost) is negative"
)
- if np.any(cls.gen_shutdown_cost < 0.0):
+ if (cls.gen_shutdown_cost < 0.0).any():
raise InvalidRedispatching(
"One of the start up cost (gen_shutdown_cost) is negative"
)
@@ -2581,10 +2603,10 @@ def _check_validity_dispathcing_data(cls):
"{} should be convertible data should be convertible to "
'{} with error: \n"{}"'.format(el, type_, exc_)
)
- if np.any(
+ if (
cls.gen_max_ramp_up[cls.gen_redispatchable]
> cls.gen_pmax[cls.gen_redispatchable]
- ):
+ ).any():
raise InvalidRedispatching(
"Invalid maximum ramp for some generator (above pmax)"
)
@@ -2706,9 +2728,12 @@ def process_grid2op_compat(cls):
# this feature did not exist before.
cls.dim_alarms = 0
cls.assistant_warning_type = None
+
if cls.glop_version < "1.9.1":
# this feature did not exists before
cls.dim_alerts = 0
+ cls.alertable_line_names = []
+ cls.alertable_line_ids = []
@classmethod
def get_obj_connect_to(cls, _sentinel=None, substation_id=None):
@@ -3579,7 +3604,7 @@ class res(GridObjects):
cls.n_load = len(cls.name_load)
cls.n_line = len(cls.name_line)
cls.n_sub = len(cls.name_sub)
- cls.dim_topo = np.sum(cls.sub_info)
+ cls.dim_topo = cls.sub_info.sum()
if dict_["gen_type"] is None:
cls.redispatching_unit_commitment_availble = False
@@ -3705,12 +3730,17 @@ class res(GridObjects):
if "dim_alerts" in dict_:
# NB by default the constructor do as if there were no alert so that's great !
cls.dim_alerts = dict_["dim_alerts"]
- cls.alertable_line_names = extract_from_dict(
- dict_, "alertable_line_names", lambda x: np.array(x).astype(str)
- )
- cls.alertable_line_ids = extract_from_dict(
- dict_, "alertable_line_ids", lambda x: np.array(x).astype(dt_int)
- )
+ if cls.dim_alerts > 0:
+ cls.alertable_line_names = extract_from_dict(
+ dict_, "alertable_line_names", lambda x: np.array(x).astype(str)
+ )
+ cls.alertable_line_ids = extract_from_dict(
+ dict_, "alertable_line_ids", lambda x: np.array(x).astype(dt_int)
+ )
+ else:
+ cls.alertable_line_names = []
+ cls.alertable_line_ids = []
+
# retrieve the redundant information that are not stored (for efficiency)
obj_ = cls()
obj_._compute_pos_big_topo_cls()
diff --git a/grid2op/__init__.py b/grid2op/__init__.py
index 48ff21c31..867a9964c 100644
--- a/grid2op/__init__.py
+++ b/grid2op/__init__.py
@@ -11,7 +11,7 @@
Grid2Op
"""
-__version__ = '1.9.1'
+__version__ = '1.9.2.dev0'
__all__ = [
"Action",
diff --git a/grid2op/data/l2rpn_idf_2023/config.py b/grid2op/data/l2rpn_idf_2023/config.py
index 5af280dba..733829b90 100644
--- a/grid2op/data/l2rpn_idf_2023/config.py
+++ b/grid2op/data/l2rpn_idf_2023/config.py
@@ -71,13 +71,13 @@
this_rules = RulesByArea([[0, 1, 2, 3, 10, 11, 116, 13, 12, 14, 4, 5, 6, 15, 7, 8, 9,
23, 27, 28, 26, 30, 114, 113, 31, 112, 16, 29, 25, 24, 17,
- 18, 19, 20, 21, 22, 24, 71, 70, 72],
+ 18, 19, 20, 21, 22, 71, 70, 72],
[32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
47, 48, 64, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 66, 65],
[69, 73, 74, 117, 75, 76, 77, 78, 79, 80, 98, 97, 96, 95, 94,
- 93, 99, 98, 105, 103, 104, 106, 107, 108, 111, 109, 110, 102,
- 100, 92, 91, 101, 100, 90, 89, 88, 87, 84, 83, 82, 81, 85, 86,
+ 93, 99, 105, 103, 104, 106, 107, 108, 111, 109, 110, 102,
+ 100, 92, 91, 101, 90, 89, 88, 87, 84, 83, 82, 81, 85, 86,
68, 67, 115]
])
diff --git a/grid2op/data_test/l2rpn_idf_2023_with_alert/config.py b/grid2op/data_test/l2rpn_idf_2023_with_alert/config.py
index 630dd98c2..96dfe9c41 100644
--- a/grid2op/data_test/l2rpn_idf_2023_with_alert/config.py
+++ b/grid2op/data_test/l2rpn_idf_2023_with_alert/config.py
@@ -9,6 +9,8 @@
try:
from grid2op.l2rpn_utils import ActionIDF2023, ObservationIDF2023
except ImportError:
+ from grid2op.Observation import CompleteObservation
+ import warnings
warnings.warn("The grid2op version you are trying to use is too old for this environment. Please upgrade it.")
ActionIDF2023 = PlayableAction
ObservationIDF2023 = CompleteObservation
diff --git a/grid2op/gym_compat/__init__.py b/grid2op/gym_compat/__init__.py
index d6952ae6a..0672745d7 100644
--- a/grid2op/gym_compat/__init__.py
+++ b/grid2op/gym_compat/__init__.py
@@ -22,6 +22,9 @@
from grid2op.gym_compat.utils import _MAX_GYM_VERSION_RANDINT, GYM_VERSION, GYMNASIUM_AVAILABLE, GYM_AVAILABLE
+if GYM_AVAILABLE is False and GYMNASIUM_AVAILABLE is False:
+ raise ImportError("Neither gymnasium nor gym are installed. The `grid2op.gym_compat` module cannot be used.")
+
# base for all gym converter
from grid2op.gym_compat.base_gym_attr_converter import BaseGymAttrConverter
if GYMNASIUM_AVAILABLE:
diff --git a/grid2op/gym_compat/box_gym_actspace.py b/grid2op/gym_compat/box_gym_actspace.py
index 142f484dd..1838a4f33 100644
--- a/grid2op/gym_compat/box_gym_actspace.py
+++ b/grid2op/gym_compat/box_gym_actspace.py
@@ -10,7 +10,6 @@
import copy
import warnings
import numpy as np
-# from gym.spaces import Box
from grid2op.Action import BaseAction, ActionSpace
from grid2op.dtypes import dt_int, dt_bool, dt_float
@@ -231,8 +230,8 @@ def __init__(
low_gen = -1.0 * act_sp.gen_max_ramp_down[act_sp.gen_redispatchable]
high_gen = 1.0 * act_sp.gen_max_ramp_up[act_sp.gen_redispatchable]
- nb_redisp = np.sum(act_sp.gen_redispatchable)
- nb_curtail = np.sum(act_sp.gen_renewable)
+ nb_redisp = act_sp.gen_redispatchable.sum()
+ nb_curtail = act_sp.gen_renewable.sum()
curtail = np.full(shape=(nb_curtail,), fill_value=0.0, dtype=dt_float)
curtail_mw = np.full(shape=(nb_curtail,), fill_value=0.0, dtype=dt_float)
self._dict_properties = {
@@ -654,7 +653,7 @@ def normalize_attr(self, attr_nm: str):
both_finite = finite_high & finite_low
both_finite &= curr_high > curr_low
- if np.any(~both_finite):
+ if (~both_finite).any():
warnings.warn(f"The normalization of attribute \"{both_finite}\" cannot be performed entirely as "
f"there are some non finite value, or `high == `low` "
f"for some components.")
diff --git a/grid2op/gym_compat/box_gym_obsspace.py b/grid2op/gym_compat/box_gym_obsspace.py
index 8b2b28d6d..5a5a778a7 100644
--- a/grid2op/gym_compat/box_gym_obsspace.py
+++ b/grid2op/gym_compat/box_gym_obsspace.py
@@ -632,10 +632,7 @@ def __init__(
# initialize the base container
type(self)._BoxType.__init__(self, low=low, high=high, shape=shape, dtype=dtype)
- # convert data in `_add` and `_multiply` to the right type
-
- # self._subtract = {k: v.astype(self.dtype) for k, v in self._subtract.items()}
- # self._divide = {k: v.astype(self.dtype) for k, v in self._divide.items()}
+ # convert data in `_subtract` and `_divide` to the right type
self._fix_value_sub_div(self._subtract, functs)
self._fix_value_sub_div(self._divide, functs)
@@ -713,7 +710,7 @@ def _get_info(self, functs):
elif isinstance(high_, float):
high_ = np.full(shape_, fill_value=high_, dtype=dtype_)
- if np.any((tmp < low_) | (tmp > high_)):
+ if ((tmp < low_) | (tmp > high_)).any():
raise RuntimeError(
f"Wrong value for low / high in the functs argument for key {el}. Please"
f"fix the low_ / high_ in the tuple ( callable_, low_, high_, shape_, dtype_)."
@@ -861,7 +858,7 @@ def normalize_attr(self, attr_nm: str):
both_finite = finite_high & finite_low
both_finite &= curr_high > curr_low
- if np.any(~both_finite):
+ if (~both_finite).any():
warnings.warn(f"The normalization of attribute \"{both_finite}\" cannot be performed entirely as "
f"there are some non finite value, or `high == `low` "
f"for some components.")
diff --git a/grid2op/gym_compat/continuous_to_discrete.py b/grid2op/gym_compat/continuous_to_discrete.py
index 653346edd..f27ba60cf 100644
--- a/grid2op/gym_compat/continuous_to_discrete.py
+++ b/grid2op/gym_compat/continuous_to_discrete.py
@@ -11,9 +11,6 @@
from grid2op.dtypes import dt_int
from grid2op.gym_compat.utils import GYM_AVAILABLE, GYMNASIUM_AVAILABLE
-# from gym.spaces import Box, MultiDiscrete
-# from grid2op.gym_compat.base_gym_attr_converter import BaseGymAttrConverter
-
class __AuxContinuousToDiscreteConverter:
"""
@@ -134,7 +131,7 @@ def gym_to_g2op(self, gym_object):
def g2op_to_gym(self, g2op_object):
mask = self._bins_size >= g2op_object
mask = 1 - mask
- res = np.sum(mask, axis=0)
+ res = mask.sum(axis=0)
res[self._ignored] = 0
return res
diff --git a/grid2op/gym_compat/multidiscrete_gym_actspace.py b/grid2op/gym_compat/multidiscrete_gym_actspace.py
index 0cf3545e6..a92620389 100644
--- a/grid2op/gym_compat/multidiscrete_gym_actspace.py
+++ b/grid2op/gym_compat/multidiscrete_gym_actspace.py
@@ -272,8 +272,8 @@ def __init__(self, grid2op_action_space, attr_to_keep=ALL_ATTR, nb_bins=None):
f"how to convert it to a discrete space. See the documentation "
f"for more information."
)
- nb_redispatch = np.sum(act_sp.gen_redispatchable)
- nb_renew = np.sum(act_sp.gen_renewable)
+ nb_redispatch = act_sp.gen_redispatchable.sum()
+ nb_renew = act_sp.gen_renewable.sum()
if el == "redispatch":
self.dict_properties[el] = (
[nb_bins[el] for _ in range(nb_redispatch)],
diff --git a/grid2op/gym_compat/utils.py b/grid2op/gym_compat/utils.py
index 0a35ebb5d..2e42adac1 100644
--- a/grid2op/gym_compat/utils.py
+++ b/grid2op/gym_compat/utils.py
@@ -48,7 +48,7 @@
)
-# raise alert or alarm is not supported
+# raise alert or alarm is not supported by ALL_ATTR_FOR_DISCRETE nor ATTR_DISCRETE
ALL_ATTR_FOR_DISCRETE = (
"set_line_status",
"change_line_status",
@@ -69,8 +69,6 @@
"sub_change_bus",
"one_sub_set",
"one_sub_change",
- # "raise_alarm"
- # "raise_alert"
)
ALL_ATTR_CONT = (
@@ -99,7 +97,7 @@ def _compute_extra_power_for_losses(gridobj):
"""
import numpy as np
- return 0.3 * np.sum(np.abs(gridobj.gen_pmax))
+ return 0.3 * np.abs(gridobj.gen_pmax).sum()
def sample_seed(max_, np_random):
diff --git a/grid2op/simulator/simulator.py b/grid2op/simulator/simulator.py
index aecca8d7b..66b203e06 100644
--- a/grid2op/simulator/simulator.py
+++ b/grid2op/simulator/simulator.py
@@ -291,7 +291,7 @@ def _update_obs(self):
def _adjust_controlable_gen(
self, new_gen_p: np.ndarray, target_dispatch: np.ndarray, sum_target: float
) -> Optional[float]:
- nb_dispatchable = np.sum(self.current_obs.gen_redispatchable)
+ nb_dispatchable = self.current_obs.gen_redispatchable.sum()
# which generators needs to be "optimized" -> the one where
# the target function matter
@@ -319,7 +319,7 @@ def _adjust_controlable_gen(
weights = np.ones(nb_dispatchable) * coeffs[self.current_obs.gen_redispatchable]
weights /= weights.sum()
- scale_objective = max(0.5 * np.sum(np.abs(target_dispatch_redisp)) ** 2, 1.0)
+ scale_objective = max(0.5 * np.abs(target_dispatch_redisp).sum() ** 2, 1.0)
scale_objective = np.round(scale_objective, decimals=4)
tmp_zeros = np.zeros((1, nb_dispatchable), dtype=float)
@@ -338,7 +338,7 @@ def target(actual_dispatchable):
coeffs_quads = weights[gen_in_target] * quad_
coeffs_quads_const = coeffs_quads.sum()
coeffs_quads_const /= scale_objective # scaling the function
- coeffs_quads_const += 1e-2 * np.sum(actual_dispatchable**2 * weights)
+ coeffs_quads_const += 1e-2 * (actual_dispatchable**2 * weights).sum()
return coeffs_quads_const
def jac(actual_dispatchable):
@@ -383,9 +383,9 @@ def f(init):
# desired solution (split the (sum of the) dispatch to the available generators)
x0 = 1.0 * target_dispatch_redisp
can_adjust = x0 == 0.0
- if np.any(can_adjust):
- init_sum = np.sum(x0)
- denom_adjust = np.sum(1.0 / weights[can_adjust])
+ if (can_adjust).any():
+ init_sum = x0.sum()
+ denom_adjust = (1.0 / weights[can_adjust]).sum()
if denom_adjust <= 1e-2:
# i don't want to divide by something too cloose to 0.
denom_adjust = 1.0
@@ -405,14 +405,14 @@ def _amount_curtailed(
limit_curtail = curt_vect * act.gen_pmax
curtailed = np.maximum(new_gen_p - limit_curtail, 0.0)
curtailed[~act.gen_renewable] = 0.0
- amount_curtail = np.sum(curtailed)
+ amount_curtail = curtailed.sum()
new_gen_p_after_curtail = 1.0 * new_gen_p
new_gen_p_after_curtail -= curtailed
return new_gen_p_after_curtail, amount_curtail
def _amount_storage(self, act: BaseAction) -> Tuple[float, np.ndarray]:
storage_act = 1.0 * act.storage_p
- res = np.sum(self.current_obs.storage_power_target)
+ res = self.current_obs.storage_power_target.sum()
current_charge = 1.0 * self.current_obs.storage_charge
storage_power = np.zeros(act.n_storage)
if np.all(np.abs(storage_act) <= self._tol_redisp):
@@ -438,7 +438,7 @@ def _amount_storage(self, act: BaseAction) -> Tuple[float, np.ndarray]:
storage_power = storage_act_E / coeff_p_to_E
storage_power[do_charge] *= act.storage_charging_efficiency[do_charge]
storage_power[do_discharge] /= act.storage_discharging_efficiency[do_discharge]
- res += np.sum(storage_power)
+ res += storage_power.sum()
return -res, storage_power, current_charge
def _fix_redisp_curtailment_storage(
@@ -466,7 +466,7 @@ def _fix_redisp_curtailment_storage(
new_vect_redisp
]
- if abs(np.sum(target_dispatch) - sum_target) >= self._tol_redisp:
+ if abs(target_dispatch.sum() - sum_target) >= self._tol_redisp:
adjust = self._adjust_controlable_gen(
new_gen_p_after_curtail, target_dispatch, sum_target
)
diff --git a/grid2op/tests/BaseBackendTest.py b/grid2op/tests/BaseBackendTest.py
index 3ea0ddd4b..01e849e20 100644
--- a/grid2op/tests/BaseBackendTest.py
+++ b/grid2op/tests/BaseBackendTest.py
@@ -57,7 +57,7 @@ def comb(n, k):
from grid2op.Rules import RulesChecker
from grid2op.MakeEnv import make
from grid2op.Rules import AlwaysLegal
-from grid2op.Action._BackendAction import _BackendAction
+from grid2op.Action._backendAction import _BackendAction
import pdb
diff --git a/grid2op/tests/_aux_opponent_for_test_alerts.py b/grid2op/tests/_aux_opponent_for_test_alerts.py
new file mode 100644
index 000000000..13d8b50db
--- /dev/null
+++ b/grid2op/tests/_aux_opponent_for_test_alerts.py
@@ -0,0 +1,127 @@
+# Copyright (c) 2023, RTE (https://www.rte-france.com)
+# See AUTHORS.txt
+# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
+# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
+# you can obtain one at http://mozilla.org/MPL/2.0/.
+# SPDX-License-Identifier: MPL-2.0
+# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
+
+import numpy as np
+import copy
+from grid2op.Opponent import BaseOpponent
+
+
+def _get_steps_attack(kwargs_opponent, multi=False):
+ """computes the steps for which there will be attacks"""
+ ts_attack = np.array(kwargs_opponent["steps_attack"])
+ res = []
+ for i, ts in enumerate(ts_attack):
+ if not multi:
+ res.append(ts + np.arange(kwargs_opponent["duration"]))
+ else:
+ res.append(ts + np.arange(kwargs_opponent["duration"][i]))
+ return np.unique(np.concatenate(res).flatten())
+
+def _get_blackout(action_space):
+ blackout_action = action_space({})
+ blackout_action.gen_set_bus = [(0, -1)]
+ return blackout_action
+
+
+class OpponentForTestAlert(BaseOpponent):
+ """An opponent that can select the line attack, the time and duration of the attack."""
+
+ def __init__(self, action_space):
+ super().__init__(action_space)
+ self.env = None
+ self.lines_attacked = None
+ self.custom_attack = None
+ self.attack_duration = None
+ self.attack_steps = None
+ self.attack_id = None
+
+ def _custom_deepcopy_for_copy(self, new_obj, dict_=None):
+ new_obj.env = dict_["partial_env"]
+ new_obj.lines_attacked = copy.deepcopy(self.lines_attacked)
+ new_obj.custom_attack = [act.copy() for act in self.custom_attack]
+ new_obj.attack_duration = copy.deepcopy(self.attack_duration)
+ new_obj.attack_steps = copy.deepcopy(self.attack_steps)
+ new_obj.attack_id = copy.deepcopy(self.attack_id)
+ return super()._custom_deepcopy_for_copy(new_obj, dict_)
+
+ def init(self,
+ partial_env,
+ lines_attacked,
+ attack_duration=[],
+ attack_steps=[],
+ attack_id=[]):
+ self.lines_attacked = lines_attacked
+ self.custom_attack = [ self.action_space({"set_line_status" : [(l, -1)]}) for l in attack_id]
+ self.attack_duration = attack_duration
+ self.attack_steps = attack_steps
+ self.attack_id = attack_id
+ self.env = partial_env
+
+ def attack(self, observation, agent_action, env_action, budget, previous_fails):
+ if observation is None:
+ return None, None
+ current_step = self.env.nb_time_step
+ if current_step not in self.attack_steps:
+ return None, None
+ index = self.attack_steps.index(current_step)
+ return self.custom_attack[index], self.attack_duration[index]
+
+
+class TestOpponent(BaseOpponent):
+ """An opponent that can select the line attack, the time and duration of the attack."""
+
+ def __init__(self, action_space):
+ super().__init__(action_space)
+ self.custom_attack = None
+ self.duration = None
+ self.steps_attack = None
+
+ def init(self, partial_env, lines_attacked, duration=10, steps_attack=[0,1]):
+ attacked_line = lines_attacked[0]
+ self.custom_attack = self.action_space({"set_line_status" : [(l, -1) for l in lines_attacked]})
+ self.duration = duration
+ self.steps_attack = steps_attack
+ self.env = partial_env
+
+ def attack(self, observation, agent_action, env_action, budget, previous_fails):
+ if observation is None:
+ return None, None
+ current_step = self.env.nb_time_step
+ if current_step not in self.steps_attack:
+ return None, None
+
+ return self.custom_attack, self.duration
+
+
+class TestOpponentMultiLines(BaseOpponent):
+ """An opponent that can select the line attack, the time and duration of the attack."""
+
+ def __init__(self, action_space):
+ super().__init__(action_space)
+ self.custom_attack = None
+ self.duration = None
+ self.steps_attack = None
+
+ def init(self, partial_env, lines_attacked, duration=[10,10], steps_attack=[0,1]):
+ attacked_line = lines_attacked[0]
+ self.custom_attack = [ self.action_space({"set_line_status" : [(l, -1)]}) for l in lines_attacked]
+ self.duration = duration
+ self.steps_attack = steps_attack
+ self.env = partial_env
+
+ def attack(self, observation, agent_action, env_action, budget, previous_fails):
+ if observation is None:
+ return None, None
+
+ current_step = self.env.nb_time_step
+ if current_step not in self.steps_attack:
+ return None, None
+
+ index = self.steps_attack.index(current_step)
+
+ return self.custom_attack[index], self.duration[index]
diff --git a/grid2op/tests/test_alert_score.py b/grid2op/tests/test_AlertReward.py
similarity index 92%
rename from grid2op/tests/test_alert_score.py
rename to grid2op/tests/test_AlertReward.py
index 0717bef38..f95f3a568 100644
--- a/grid2op/tests/test_alert_score.py
+++ b/grid2op/tests/test_AlertReward.py
@@ -19,11 +19,13 @@
from grid2op.Parameters import Parameters
from grid2op.Exceptions import Grid2OpException
from grid2op.Runner import Runner # TODO
-from grid2op.Opponent import BaseOpponent, GeometricOpponent
from grid2op.Action import BaseAction, PlayableAction
from grid2op.Agent import BaseAgent
from grid2op.Episode import EpisodeData
+from _aux_opponent_for_test_alerts import (_get_steps_attack,
+ TestOpponent,
+ TestOpponentMultiLines)
ALL_ATTACKABLE_LINES= [
"62_58_180",
@@ -40,72 +42,11 @@
ATTACKED_LINE = "48_50_136"
-
-def _get_steps_attack(kwargs_opponent, multi=False):
- """computes the steps for which there will be attacks"""
- ts_attack = np.array(kwargs_opponent["steps_attack"])
- res = []
- for i, ts in enumerate(ts_attack):
- if not multi:
- res.append(ts + np.arange(kwargs_opponent["duration"]))
- else:
- res.append(ts + np.arange(kwargs_opponent["duration"][i]))
- return np.unique(np.concatenate(res).flatten())
-
-
-class TestOpponent(BaseOpponent):
- """An opponent that can select the line attack, the time and duration of the attack."""
-
- def __init__(self, action_space):
- super().__init__(action_space)
- self.custom_attack = None
- self.duration = None
- self.steps_attack = None
-
- def init(self, partial_env, lines_attacked=[ATTACKED_LINE], duration=10, steps_attack=[0,1]):
- attacked_line = lines_attacked[0]
- self.custom_attack = self.action_space({"set_line_status" : [(l, -1) for l in lines_attacked]})
- self.duration = duration
- self.steps_attack = steps_attack
- self.env = partial_env
-
- def attack(self, observation, agent_action, env_action, budget, previous_fails):
- if observation is None:
- return None, None
- current_step = self.env.nb_time_step
- if current_step not in self.steps_attack:
- return None, None
-
- return self.custom_attack, self.duration
-
-
-class TestOpponentMultiLines(BaseOpponent):
- """An opponent that can select the line attack, the time and duration of the attack."""
-
- def __init__(self, action_space):
- super().__init__(action_space)
- self.custom_attack = None
- self.duration = None
- self.steps_attack = None
-
- def init(self, partial_env, lines_attacked=[ATTACKED_LINE], duration=[10,10], steps_attack=[0,1]):
- attacked_line = lines_attacked[0]
- self.custom_attack = [ self.action_space({"set_line_status" : [(l, -1)]}) for l in lines_attacked]
- self.duration = duration
- self.steps_attack = steps_attack
- self.env = partial_env
-
- def attack(self, observation, agent_action, env_action, budget, previous_fails):
- if observation is None:
- return None, None
-
- current_step = self.env.nb_time_step
- if current_step not in self.steps_attack:
- return None, None
-
- index = self.steps_attack.index(current_step)
-
- return self.custom_attack[index], self.duration[index]
+DEFAULT_ALERT_REWARD_PARAMS = dict(reward_min_no_blackout=-1.0,
+ reward_min_blackout=-10.0,
+ reward_max_no_blackout=1.0,
+ reward_max_blackout=2.0,
+ reward_end_episode_bonus=42.0)
# Test alert blackout / tets alert no blackout
@@ -128,7 +69,7 @@ def test_assistant_reward_value_no_blackout_no_attack_no_alert(self) -> None :
self.env_nm,
test=True,
difficulty="1",
- reward_class=AlertReward(reward_end_episode_bonus=42)
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS)
) as env:
env.seed(0)
env.reset()
@@ -157,7 +98,7 @@ def test_assistant_reward_value_no_blackout_no_attack_alert(self) -> None :
self.env_nm,
test=True,
difficulty="1",
- reward_class=AlertReward(reward_end_episode_bonus=42)
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS)
) as env:
env.seed(0)
env.reset()
@@ -203,7 +144,7 @@ def test_assistant_reward_value_no_blackout_attack_no_alert(self) -> None :
opponent_action_class=PlayableAction,
opponent_class=TestOpponent,
kwargs_opponent=kwargs_opponent,
- reward_class=AlertReward(reward_end_episode_bonus=42),
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS),
_add_to_name="_tarvnbana"
) as env :
env.seed(0)
@@ -242,7 +183,7 @@ def test_assistant_reward_value_no_blackout_attack_alert(self) -> None :
opponent_action_class=PlayableAction,
opponent_class=TestOpponent,
kwargs_opponent=kwargs_opponent,
- reward_class=AlertReward(reward_end_episode_bonus=42),
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS),
_add_to_name="_tarvnba"
) as env :
env.seed(0)
@@ -288,6 +229,7 @@ def test_assistant_reward_value_no_blackout_attack_alert_too_late(self) -> None
opponent_action_class=PlayableAction,
opponent_class=TestOpponent,
kwargs_opponent=kwargs_opponent,
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS),
_add_to_name="_tarvnbaatl"
) as env :
env.seed(0)
@@ -308,7 +250,7 @@ def test_assistant_reward_value_no_blackout_attack_alert_too_late(self) -> None
if step == 4 :
assert reward == 1
elif step == env.max_episode_duration():
- assert reward == 1
+ assert reward == 42
else :
assert reward == 0
@@ -321,8 +263,8 @@ def test_assistant_reward_value_no_blackout_attack_alert_too_early(self)-> None
"""
kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE],
- duration=3,
- steps_attack=[2])
+ duration=3,
+ steps_attack=[2])
with make(self.env_nm,
test=True,
difficulty="1",
@@ -333,6 +275,7 @@ def test_assistant_reward_value_no_blackout_attack_alert_too_early(self)-> None
opponent_action_class=PlayableAction,
opponent_class=TestOpponent,
kwargs_opponent=kwargs_opponent,
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS),
_add_to_name="_tarvnbaate"
) as env :
env.seed(0)
@@ -354,7 +297,7 @@ def test_assistant_reward_value_no_blackout_attack_alert_too_early(self)-> None
if step == 4:
assert reward == 1
elif step == env.max_episode_duration():
- assert reward == 1
+ assert reward == 42
else :
assert reward == 0
@@ -368,8 +311,8 @@ def test_assistant_reward_value_no_blackout_2_attack_same_time_no_alert(self) ->
"""
kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'],
- duration=3,
- steps_attack=[1])
+ duration=3,
+ steps_attack=[1])
with make(self.env_nm,
test=True,
difficulty="1",
@@ -380,7 +323,7 @@ def test_assistant_reward_value_no_blackout_2_attack_same_time_no_alert(self) ->
opponent_action_class=PlayableAction,
opponent_class=TestOpponent,
kwargs_opponent=kwargs_opponent,
- reward_class=AlertReward(reward_end_episode_bonus=42),
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS),
_add_to_name="_tarvnb2astna"
) as env :
env.seed(0)
@@ -422,6 +365,7 @@ def test_assistant_reward_value_no_blackout_2_attack_same_time_1_alert(self) ->
opponent_action_class=PlayableAction,
opponent_class=TestOpponent,
kwargs_opponent=kwargs_opponent,
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS),
_add_to_name="_tarvnb2ast1a"
) as env :
env.seed(0)
@@ -442,7 +386,7 @@ def test_assistant_reward_value_no_blackout_2_attack_same_time_1_alert(self) ->
if step == 4 :
assert reward == 0
elif step == env.max_episode_duration():
- assert reward == 1
+ assert reward == 42
else :
assert reward == 0
@@ -454,8 +398,8 @@ def test_assistant_reward_value_no_blackout_2_attack_same_time_2_alert(self) ->
until the end of the episode where we have a bonus (here artificially 42)
"""
kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'],
- duration=3,
- steps_attack=[2])
+ duration=3,
+ steps_attack=[2])
with make(self.env_nm,
test=True,
difficulty="1",
@@ -466,6 +410,7 @@ def test_assistant_reward_value_no_blackout_2_attack_same_time_2_alert(self) ->
opponent_action_class=PlayableAction,
opponent_class=TestOpponent,
kwargs_opponent=kwargs_opponent,
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS),
_add_to_name="_tarvnb2ast2a"
) as env :
env.seed(0)
@@ -486,7 +431,7 @@ def test_assistant_reward_value_no_blackout_2_attack_same_time_2_alert(self) ->
if step == 4 :
assert reward == -1
elif step == env.max_episode_duration():
- assert reward == 1
+ assert reward == 42
else :
assert reward == 0
@@ -512,7 +457,7 @@ def test_assistant_reward_value_no_blackout_2_attack_diff_time_no_alert(self) ->
opponent_action_class=PlayableAction,
opponent_class=TestOpponentMultiLines,
kwargs_opponent=kwargs_opponent,
- reward_class=AlertReward(reward_end_episode_bonus=42),
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS),
_add_to_name="_tarvnb2dtna"
) as env :
env.seed(0)
@@ -558,7 +503,7 @@ def test_assistant_reward_value_no_blackout_2_attack_diff_time_2_alert(self) ->
opponent_action_class=PlayableAction,
opponent_class=TestOpponentMultiLines,
kwargs_opponent=kwargs_opponent,
- reward_class=AlertReward(reward_end_episode_bonus=42),
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS),
_add_to_name="_tarvnb2dt2a"
) as env :
env.seed(0)
@@ -595,8 +540,8 @@ def test_assistant_reward_value_no_blackout_2_attack_diff_time_alert_first_attac
"""
kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'],
- duration=[1,1],
- steps_attack=[2, 3])
+ duration=[1,1],
+ steps_attack=[2, 3])
with make(self.env_nm,
test=True,
difficulty="1",
@@ -607,7 +552,7 @@ def test_assistant_reward_value_no_blackout_2_attack_diff_time_alert_first_attac
opponent_action_class=PlayableAction,
opponent_class=TestOpponentMultiLines,
kwargs_opponent=kwargs_opponent,
- reward_class=AlertReward(reward_end_episode_bonus=42),
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS),
_add_to_name="_tarvnb2dtafa"
) as env :
env.seed(0)
@@ -642,8 +587,8 @@ def test_assistant_reward_value_no_blackout_2_attack_diff_time_alert_second_atta
until the end of the episode where we have a bonus (here artificially 42)
"""
kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'],
- duration=[1,1],
- steps_attack=[2, 3])
+ duration=[1,1],
+ steps_attack=[2, 3])
with make(self.env_nm,
test=True,
difficulty="1",
@@ -654,7 +599,7 @@ def test_assistant_reward_value_no_blackout_2_attack_diff_time_alert_second_atta
opponent_action_class=PlayableAction,
opponent_class=TestOpponentMultiLines,
kwargs_opponent=kwargs_opponent,
- reward_class=AlertReward(reward_end_episode_bonus=42),
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS),
_add_to_name="_tarvnb2dtasa"
) as env :
env.seed(0)
@@ -686,7 +631,7 @@ def test_raise_illicit_alert(self) -> None:
self.env_nm,
test=True,
difficulty="1",
- reward_class=AlertReward(reward_end_episode_bonus=42)
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS)
) as env:
env.seed(0)
env.reset()
@@ -738,7 +683,7 @@ def test_assistant_reward_value_blackout_attack_no_alert(self) -> None :
opponent_action_class=PlayableAction,
opponent_class=TestOpponent,
kwargs_opponent=kwargs_opponent,
- reward_class=AlertReward(reward_end_episode_bonus=42),
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS),
_add_to_name="_tarvbana"
) as env :
new_param = Parameters()
@@ -783,6 +728,7 @@ def test_assistant_reward_value_blackout_attack_raise_good_alert(self) -> None :
opponent_action_class=PlayableAction,
opponent_class=TestOpponent,
kwargs_opponent=kwargs_opponent,
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS),
_add_to_name="_tarvbarga"
) as env :
new_param = Parameters()
@@ -834,6 +780,7 @@ def test_assistant_reward_value_blackout_attack_raise_alert_just_before_blackout
opponent_action_class=PlayableAction,
opponent_class=TestOpponent,
kwargs_opponent=kwargs_opponent,
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS),
_add_to_name="_tarvbarajbb"
) as env :
new_param = Parameters()
@@ -873,8 +820,8 @@ def test_assistant_reward_value_blackout_attack_raise_alert_too_early(self) -> N
"""
# return -10
kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE],
- duration=3,
- steps_attack=[3])
+ duration=3,
+ steps_attack=[3])
with make(self.env_nm,
test=True,
difficulty="1",
@@ -885,6 +832,7 @@ def test_assistant_reward_value_blackout_attack_raise_alert_too_early(self) -> N
opponent_action_class=PlayableAction,
opponent_class=TestOpponent,
kwargs_opponent=kwargs_opponent,
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS),
_add_to_name="_tarvbarate"
) as env :
new_param = Parameters()
@@ -935,6 +883,7 @@ def test_assistant_reward_value_blackout_2_lines_same_step_in_window_good_alert
opponent_action_class=PlayableAction,
opponent_class=TestOpponent,
kwargs_opponent=kwargs_opponent,
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS),
_add_to_name="_tarvb2lssiwga"
) as env :
new_param = Parameters()
@@ -974,8 +923,8 @@ def test_assistant_reward_value_blackout_2_lines_attacked_simulaneous_only_1_ale
we expect a reward of -4 when the blackout occur at step 4
"""
kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'],
- duration=3,
- steps_attack=[3, 3])
+ duration=3,
+ steps_attack=[3, 3])
with make(self.env_nm,
test=True,
difficulty="1",
@@ -986,6 +935,8 @@ def test_assistant_reward_value_blackout_2_lines_attacked_simulaneous_only_1_ale
opponent_action_class=PlayableAction,
opponent_class=TestOpponent,
kwargs_opponent=kwargs_opponent,
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS),
+
_add_to_name="_tarvb2laso1a"
) as env :
new_param = Parameters()
@@ -1038,7 +989,7 @@ def test_assistant_reward_value_blackout_2_lines_different_step_in_window_good_
opponent_action_class=PlayableAction,
opponent_class=TestOpponentMultiLines,
kwargs_opponent=kwargs_opponent,
- reward_class=AlertReward(reward_end_episode_bonus=42),
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS),
_add_to_name="_tarvb2ldsiwga"
) as env :
env.seed(0)
@@ -1090,7 +1041,7 @@ def test_assistant_reward_value_blackout_2_lines_attacked_different_step_in_wind
opponent_action_class=PlayableAction,
opponent_class=TestOpponentMultiLines,
kwargs_opponent=kwargs_opponent,
- reward_class=AlertReward(reward_end_episode_bonus=42),
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS),
_add_to_name="_tarvb2ladsiwo1aofal"
) as env :
env.seed(0)
@@ -1138,7 +1089,7 @@ def test_assistant_reward_value_blackout_2_lines_attacked_different_step_in_wind
opponent_action_class=PlayableAction,
opponent_class=TestOpponentMultiLines,
kwargs_opponent=kwargs_opponent,
- reward_class=AlertReward(reward_end_episode_bonus=42),
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS),
_add_to_name="_tarvb2ladsiwo1aosal"
) as env :
env.seed(0)
@@ -1184,7 +1135,7 @@ def test_assistant_reward_value_blackout_2_lines_attacked_different_1_in_window_
opponent_action_class=PlayableAction,
opponent_class=TestOpponentMultiLines,
kwargs_opponent=kwargs_opponent,
- reward_class=AlertReward(reward_end_episode_bonus=42),
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS),
_add_to_name="_tarvb2lad1iw1ga"
) as env :
env.seed(0)
@@ -1223,7 +1174,7 @@ def test_assistant_reward_value_blackout_no_attack_alert(self) -> None :
self.env_nm,
test=True,
difficulty="1",
- reward_class=AlertReward(reward_end_episode_bonus=42)
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS)
) as env:
env.seed(0)
env.reset()
@@ -1254,7 +1205,7 @@ def test_assistant_reward_value_blackout_no_attack_no_alert(self) -> None :
self.env_nm,
test=True,
difficulty="1",
- reward_class=AlertReward(reward_end_episode_bonus=42)
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS)
) as env:
env.seed(0)
env.reset()
@@ -1283,7 +1234,7 @@ def test_assistant_reward_value_blackout_attack_before_window_alert(self) -> Non
self.env_nm,
test=True,
difficulty="1",
- reward_class=AlertReward(reward_end_episode_bonus=42)
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS)
) as env:
env.seed(0)
env.reset()
@@ -1314,7 +1265,7 @@ def test_assistant_reward_value_blackout_attack_before_window_no_alert(self) ->
self.env_nm,
test=True,
difficulty="1",
- reward_class=AlertReward(reward_end_episode_bonus=42)
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS)
) as env:
env.seed(0)
env.reset()
@@ -1346,7 +1297,7 @@ def setUp(self) -> None:
PATH_DATA_TEST, "l2rpn_idf_2023_with_alert"
)
self.env = make(self.env_nm, test=True, difficulty="1",
- reward_class=AlertReward(reward_end_episode_bonus=42))
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS))
self.env.seed(0)
return super().setUp()
@@ -1387,7 +1338,7 @@ def setUp(self) -> None:
PATH_DATA_TEST, "l2rpn_idf_2023_with_alert"
)
self.env = make(self.env_nm, test=True, difficulty="1",
- reward_class=AlertReward(reward_end_episode_bonus=42))
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS))
self.env.seed(0)
return super().setUp()
@@ -1448,7 +1399,7 @@ def test_with_opp(self):
opponent_action_class=PlayableAction,
opponent_class=TestOpponent,
kwargs_opponent=kwargs_opponent,
- reward_class=AlertReward(reward_end_episode_bonus=42),
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS),
_add_to_name = "_test_with_opp")
# without alert
runner = Runner(**env.get_params_for_runner())
@@ -1468,4 +1419,4 @@ def act(self, observation: BaseObservation, reward: float, done: bool = False) -
if __name__ == "__main__":
- unittest.main()
+ unittest.main()
\ No newline at end of file
diff --git a/grid2op/tests/test_BackendConverter.py b/grid2op/tests/test_BackendConverter.py
index 9da79000f..b6811381f 100644
--- a/grid2op/tests/test_BackendConverter.py
+++ b/grid2op/tests/test_BackendConverter.py
@@ -15,18 +15,18 @@
from grid2op.Backend import PandaPowerBackend
from grid2op.tests.helper_path_test import HelperTests
-from grid2op.tests.BaseBackendTest import BaseTestNames
-from grid2op.tests.BaseBackendTest import BaseTestLoadingCase
-from grid2op.tests.BaseBackendTest import BaseTestLoadingBackendFunc
-from grid2op.tests.BaseBackendTest import BaseTestTopoAction
-from grid2op.tests.BaseBackendTest import BaseTestEnvPerformsCorrectCascadingFailures
-from grid2op.tests.BaseBackendTest import BaseTestChangeBusAffectRightBus
-from grid2op.tests.BaseBackendTest import BaseTestShuntAction
-from grid2op.tests.BaseBackendTest import BaseTestResetEqualsLoadGrid
-from grid2op.tests.BaseBackendTest import BaseTestVoltageOWhenDisco
-from grid2op.tests.BaseBackendTest import BaseTestChangeBusSlack
-from grid2op.tests.BaseBackendTest import BaseIssuesTest
-from grid2op.tests.BaseBackendTest import BaseStatusActions
+from grid2op.tests.BaseBackendTest import (BaseTestNames,
+ BaseTestLoadingCase,
+ BaseTestLoadingBackendFunc,
+ BaseTestTopoAction,
+ BaseTestEnvPerformsCorrectCascadingFailures,
+ BaseTestChangeBusAffectRightBus,
+ BaseTestShuntAction,
+ BaseTestResetEqualsLoadGrid,
+ BaseTestVoltageOWhenDisco,
+ BaseTestChangeBusSlack,
+ BaseIssuesTest,
+ BaseStatusActions)
PATH_DATA_TEST_INIT = PATH_DATA_TEST
PATH_DATA_TEST = PATH_DATA_TEST_PP
diff --git a/grid2op/tests/test_ChronicsHandler.py b/grid2op/tests/test_ChronicsHandler.py
index 53c8cb62b..a9349f105 100644
--- a/grid2op/tests/test_ChronicsHandler.py
+++ b/grid2op/tests/test_ChronicsHandler.py
@@ -23,12 +23,12 @@
GridStateFromFileWithForecasts,
Multifolder,
GridValue,
+ MultifolderWithCache,
+ GridStateFromFileWithForecastsWithoutMaintenance
)
-from grid2op.Chronics import MultifolderWithCache
from grid2op.Backend import PandaPowerBackend
from grid2op.Parameters import Parameters
from grid2op.Rules import AlwaysLegal
-from grid2op.Chronics import GridStateFromFileWithForecastsWithoutMaintenance
from grid2op.Runner import Runner
import warnings
diff --git a/grid2op/tests/test_Converter.py b/grid2op/tests/test_Converter.py
index e57e6e5a2..58a6e758c 100644
--- a/grid2op/tests/test_Converter.py
+++ b/grid2op/tests/test_Converter.py
@@ -9,13 +9,12 @@
import warnings
import os
import json
-from grid2op.Action.BaseAction import BaseAction
+from grid2op.Action import BaseAction, PlayableAction
from grid2op.tests.helper_path_test import *
from grid2op.MakeEnv import make
from grid2op.Parameters import Parameters
from grid2op.Converter import ConnectivityConverter, IdToAct
-from grid2op.Action import PlayableAction
import tempfile
import pdb
diff --git a/grid2op/tests/test_GridObjects.py b/grid2op/tests/test_GridObjects.py
index c405afcc2..d606e7fe3 100644
--- a/grid2op/tests/test_GridObjects.py
+++ b/grid2op/tests/test_GridObjects.py
@@ -14,7 +14,7 @@
import warnings
import grid2op
-from grid2op.Backend.EducPandaPowerBackend import EducPandaPowerBackend
+from grid2op.Backend.educPandaPowerBackend import EducPandaPowerBackend
from grid2op.Exceptions import EnvError
diff --git a/grid2op/tests/test_GymConverter.py b/grid2op/tests/test_GymConverter.py
index 03509c153..dce5b1399 100644
--- a/grid2op/tests/test_GymConverter.py
+++ b/grid2op/tests/test_GymConverter.py
@@ -9,7 +9,7 @@
# TODO test the json part but... https://github.com/openai/gym-http-api/issues/62 or https://github.com/openai/gym/issues/1841
import tempfile
import json
-from grid2op.gym_compat.discrete_gym_actspace import DiscreteActSpace
+from grid2op.gym_compat import (DiscreteActSpace, GymActionSpace, GymObservationSpace, GymEnv, ContinuousToDiscreteConverter)
from grid2op.tests.helper_path_test import *
from grid2op.Action import PlayableAction
@@ -17,10 +17,6 @@
from grid2op.tests.helper_path_test import *
from grid2op.MakeEnv import make
from grid2op.Converter import IdToAct, ToVect
-from grid2op.gym_compat import GymActionSpace, GymObservationSpace
-from grid2op.gym_compat import GymEnv
-from grid2op.gym_compat import ContinuousToDiscreteConverter
-
import pdb
import warnings
diff --git a/grid2op/tests/test_RewardAlertCostScore.py b/grid2op/tests/test_RewardAlertCostScore.py
new file mode 100644
index 000000000..160872fa6
--- /dev/null
+++ b/grid2op/tests/test_RewardAlertCostScore.py
@@ -0,0 +1,179 @@
+# Copyright (c) 2023, RTE (https://www.rte-france.com)
+# See AUTHORS.txt
+# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
+# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
+# you can obtain one at http://mozilla.org/MPL/2.0/.
+# SPDX-License-Identifier: MPL-2.0
+# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
+
+import warnings
+import numpy as np
+import unittest
+import tempfile
+import grid2op
+from grid2op.Reward import _AlertCostScore, _AlertTrustScore
+from grid2op.Agent import DoNothingAgent, BaseAgent
+from grid2op.tests.helper_path_test import *
+from grid2op.Exceptions import Grid2OpException
+from grid2op.Runner import Runner
+from grid2op.Observation import BaseObservation
+from grid2op.Episode import EpisodeData
+from grid2op.Parameters import Parameters
+from grid2op.Opponent import BaseOpponent, GeometricOpponent
+from grid2op.Action import BaseAction, PlayableAction
+from _aux_opponent_for_test_alerts import (_get_steps_attack,
+ TestOpponent
+ )
+
+ATTACKED_LINE = "48_50_136"
+
+class AlertAgent(BaseAgent):
+ def act(self, observation: BaseObservation, reward: float, done: bool = False) -> BaseAction:
+ if observation.current_step == 2:
+ return self.action_space({"raise_alert": [0]})
+ return super().act(observation, reward, done)
+
+#TODO
+# Review these tests comprehensively when usage is revived.
+# Was originally thought for use in L2RPN 2023 Competition. But eventually not selected for use.
+# Tests were disregarded at some stage of these developments.
+
+class TestAlertCostScore(unittest.TestCase):
+
+ def test_specs(self):
+ # test function without actual data
+ assert _AlertCostScore._penalization_fun(50) == -1.
+ assert _AlertCostScore._penalization_fun(80) == 0.
+ assert _AlertCostScore._penalization_fun(100) == 1.
+
+ def setUp(self) -> None:
+ self.env_nm = os.path.join(
+ PATH_DATA_TEST, "l2rpn_idf_2023_with_alert"
+ )
+
+ def tearDown(self) -> None:
+ return super().tearDown()
+
+ def test_assistant_reward_value_no_blackout_no_attack_no_alert(self) -> None :
+ """ When no blackout and no attack occur, and no alert is raised we expect a reward of 0
+ until the end of the episode where we get the max reward 1.
+
+ Raises:
+ Grid2OpException: raise an exception if an attack occur
+ """
+ with grid2op.make(
+ self.env_nm,
+ test=True,
+ difficulty="1",
+ reward_class=_AlertCostScore
+ ) as env:
+ env.seed(0)
+ env.reset()
+
+ done = False
+ for i in range(env.max_episode_duration()):
+ obs, reward, done, info = env.step(env.action_space())
+ if done:
+ assert reward == 1.
+ else:
+ assert reward == 0.
+
+
+class TestSimulate(unittest.TestCase):
+ def setUp(self) -> None:
+ self.env_nm = os.path.join(
+ PATH_DATA_TEST, "l2rpn_idf_2023_with_alert"
+ )
+ self.env = grid2op.make(self.env_nm, test=True, difficulty="1",
+ reward_class=_AlertCostScore)
+ self.env.seed(0)
+ return super().setUp()
+
+ def tearDown(self) -> None:
+ self.env.close()
+ return super().tearDown()
+
+ def test_simulate(self):
+ obs = self.env.reset()
+ simO, simr, simd, simi = obs.simulate(self.env.action_space())
+ assert simr == 0.
+ assert not simd
+
+ go_act = self.env.action_space({"set_bus": {"generators_id": [(0, -1)]}})
+ simO, simr, simd, simi = obs.simulate(go_act)
+ assert simr == 0., f"{simr} vs 0."
+ assert simd
+
+ def test_simulated_env(self):
+ obs = self.env.reset()
+ f_env = obs.get_forecast_env()
+ forD = False
+ while not forD:
+ forO, forR, forD, forI = f_env.step(self.env.action_space())
+ assert forR == 0.
+
+ f_env = obs.get_forecast_env()
+ forD = False
+ go_act = self.env.action_space({"set_bus": {"generators_id": [(0, -1)]}})
+ while not forD:
+ forO, forR, forD, forI = f_env.step(go_act)
+ assert forR == 0.
+
+
+class TestRunnerAlertCost(unittest.TestCase):
+ def setUp(self) -> None:
+ self.env_nm = os.path.join(
+ PATH_DATA_TEST, "l2rpn_idf_2023_with_alert"
+ )
+ self.env = grid2op.make(self.env_nm, test=True, difficulty="1",
+ reward_class=_AlertCostScore)
+ self.env.seed(0)
+ return super().setUp()
+
+ def tearDown(self) -> None:
+ self.env.close()
+ return super().tearDown()
+
+ def test_dn_agent(self):
+ obs = self.env.reset()
+ runner = Runner(**self.env.get_params_for_runner())
+ res = runner.run(nb_episode=1, episode_id=[0], max_iter=10, env_seeds=[0])
+ assert res[0][2] == 1. #it got to the end
+
+ def test_simagent(self):
+ #simulate blackout but act donothing
+ obs = self.env.reset()
+
+ class SimAgent(BaseAgent):
+ def act(self, observation: BaseObservation, reward: float, done: bool = False) -> BaseAction:
+ go_act = self.action_space({"set_bus": {"generators_id": [(0, -1)]}})
+ simO, simr, simd, simi = obs.simulate(go_act)
+ simO, simr, simd, simi = obs.simulate(self.action_space())
+ return super().act(observation, reward, done)
+
+ runner = Runner(**self.env.get_params_for_runner(),
+ agentClass=SimAgent)
+ res = runner.run(nb_episode=1, episode_id=[0], max_iter=10, env_seeds=[0])
+ assert res[0][2] == 1.
+
+ def test_episodeData(self):
+ obs = self.env.reset()
+ runner = Runner(**self.env.get_params_for_runner())
+ res = runner.run(nb_episode=1, episode_id=[0], max_iter=10, env_seeds=[0], add_detailed_output=True)
+ assert res[0][2] == 1.
+ assert res[0][5].rewards[8] == 1.
+
+ def test_with_save(self):
+ obs = self.env.reset()
+ runner = Runner(**self.env.get_params_for_runner())
+ with tempfile.TemporaryDirectory() as f:
+ res = runner.run(nb_episode=1, episode_id=[0], max_iter=10, env_seeds=[0],
+ path_save=f)
+ assert res[0][2] == 1.
+ ep0, *_ = EpisodeData.list_episode(f)
+ ep = EpisodeData.from_disk(*ep0)
+ assert ep.rewards[8] == 1.
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/grid2op/tests/test_RewardNewRenewableSourcesUsageScore.py b/grid2op/tests/test_RewardNewRenewableSourcesUsageScore.py
index bb4fe6b64..53b7974f0 100644
--- a/grid2op/tests/test_RewardNewRenewableSourcesUsageScore.py
+++ b/grid2op/tests/test_RewardNewRenewableSourcesUsageScore.py
@@ -42,6 +42,32 @@ def act(self, obs, reward, done):
sim_obs_1, *_ = obs.simulate(act, time_step=1)
return super().act(obs, reward, done)
+
+class TestJustGameOver(unittest.TestCase):
+ def setUp(self) -> None:
+ env_name = "l2rpn_case14_sandbox"
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore")
+ self.env = grid2op.make(env_name,
+ reward_class=_NewRenewableSourcesUsageScore,
+ test=True
+ )
+ self.env.set_max_iter(20)
+ self.env.parameters.NO_OVERFLOW_DISCONNECTION = True
+ self.nres_id = np.arange(self.env.n_gen)[self.env.gen_renewable]
+
+ def tearDown(self) -> None:
+ self.env.close()
+ return super().tearDown()
+
+ def test_when_no_step(self):
+ obs = self.env.reset()
+ with warnings.catch_warnings():
+ warnings.filterwarnings("error")
+ obs, reward, done, info = self.env.step(self.env.action_space({"set_bus": {"loads_id": [(0, -1)]}}))
+ assert done
+ assert reward == 1., f"{reward:.2f} vs 1."
+
class TestNewRenewableSourcesUsageScore(unittest.TestCase):
def setUp(self) -> None:
@@ -148,6 +174,22 @@ def test_simulate_ignored(self):
break
return reward == 1.
+
+ def test_simulate_blackout_ignored(self):
+ obs = self.env.reset()
+ obs, reward, done, _ = self.env.step(self.env.action_space())
+ go_act = self.env.action_space({"set_bus": {"generators_id": [(0, -1)]}})
+ simO, simr, simd, simi = obs.simulate(go_act)
+ assert simr == 0., f"{simr} vs 0."
+ assert simd
+
+ def test_simulated_env(self):
+ obs = self.env.reset()
+ f_env = obs.get_forecast_env()
+ forD = False
+ while not forD:
+ forO, forR, forD, forI = f_env.step(self.env.action_space())
+ assert forR == 0.
if __name__ == "__main__":
diff --git a/grid2op/tests/test_Runner.py b/grid2op/tests/test_Runner.py
index 6dee798ae..6f5152ba6 100644
--- a/grid2op/tests/test_Runner.py
+++ b/grid2op/tests/test_Runner.py
@@ -497,6 +497,8 @@ def test_backward_compatibility(self):
"1.7.1",
"1.7.2",
"1.8.1",
+ # "1.9.0", # this one is bugy I don"t know why
+ "1.9.1",
]
curr_version = "test_version"
assert (
@@ -517,8 +519,10 @@ def test_backward_compatibility(self):
agent_seeds=[42, 69],
)
# check that i can read this data generate for this runner
- self._aux_backward(path, curr_version, curr_version)
-
+ try:
+ self._aux_backward(path, curr_version, curr_version)
+ except Exception as exc_:
+ raise RuntimeError(f"error for {curr_version}") from exc_
assert (
"curtailment" in CompleteObservation.attr_list_vect
), "error after the first runner"
@@ -529,17 +533,20 @@ def test_backward_compatibility(self):
self._aux_backward(
PATH_PREVIOUS_RUNNER, f"res_agent_{grid2op_version}", grid2op_version
)
-
+
for grid2op_version in backward_comp_version:
# check that i can read previous data stored from previous grid2Op version
# can be loaded properly
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
- self._aux_backward(
- PATH_PREVIOUS_RUNNER,
- f"res_agent_{grid2op_version}",
- grid2op_version,
- )
+ try:
+ self._aux_backward(
+ PATH_PREVIOUS_RUNNER,
+ f"res_agent_{grid2op_version}",
+ grid2op_version,
+ )
+ except Exception as exc_:
+ raise RuntimeError(f"error for {grid2op_version}") from exc_
assert "curtailment" in CompleteObservation.attr_list_vect, (
f"error after the legacy version " f"{grid2op_version}"
)
diff --git a/grid2op/tests/test_alert_feature.py b/grid2op/tests/test_alert_obs_act.py
similarity index 89%
rename from grid2op/tests/test_alert_feature.py
rename to grid2op/tests/test_alert_obs_act.py
index 9d4d2e178..e2fbc9dfa 100644
--- a/grid2op/tests/test_alert_feature.py
+++ b/grid2op/tests/test_alert_obs_act.py
@@ -10,21 +10,15 @@
import numpy as np
import unittest
import os
-import copy
-import tempfile
from grid2op.Observation import BaseObservation
from grid2op.tests.helper_path_test import *
from grid2op import make
from grid2op.Reward import AlertReward
-from grid2op.Parameters import Parameters
-from grid2op.Exceptions import Grid2OpException
from grid2op.Runner import Runner # TODO
-from grid2op.Opponent import BaseOpponent, GeometricOpponent
-from grid2op.Action import BaseAction, PlayableAction
-from grid2op.Agent import BaseAgent
-from grid2op.Episode import EpisodeData
+from grid2op.Action import PlayableAction
+from _aux_opponent_for_test_alerts import OpponentForTestAlert
ALL_ATTACKABLE_LINES = [
"62_58_180",
@@ -40,60 +34,11 @@
]
-def _get_steps_attack(kwargs_opponent, multi=False):
- """computes the steps for which there will be attacks"""
- ts_attack = np.array(kwargs_opponent["steps_attack"])
- res = []
- for i, ts in enumerate(ts_attack):
- if not multi:
- res.append(ts + np.arange(kwargs_opponent["duration"]))
- else:
- res.append(ts + np.arange(kwargs_opponent["duration"][i]))
- return np.unique(np.concatenate(res).flatten())
-
-
-class OpponentForTestAlert(BaseOpponent):
- """An opponent that can select the line attack, the time and duration of the attack."""
-
- def __init__(self, action_space):
- super().__init__(action_space)
- self.env = None
- self.lines_attacked = None
- self.custom_attack = None
- self.attack_duration = None
- self.attack_steps = None
- self.attack_id = None
-
- def _custom_deepcopy_for_copy(self, new_obj, dict_=None):
- new_obj.env = dict_["partial_env"]
- new_obj.lines_attacked = copy.deepcopy(self.lines_attacked)
- new_obj.custom_attack = [act.copy() for act in self.custom_attack]
- new_obj.attack_duration = copy.deepcopy(self.attack_duration)
- new_obj.attack_steps = copy.deepcopy(self.attack_steps)
- new_obj.attack_id = copy.deepcopy(self.attack_id)
- return super()._custom_deepcopy_for_copy(new_obj, dict_)
-
- def init(self,
- partial_env,
- lines_attacked=ALL_ATTACKABLE_LINES,
- attack_duration=[],
- attack_steps=[],
- attack_id=[]):
- self.lines_attacked = lines_attacked
- self.custom_attack = [ self.action_space({"set_line_status" : [(l, -1)]}) for l in attack_id]
- self.attack_duration = attack_duration
- self.attack_steps = attack_steps
- self.attack_id = attack_id
- self.env = partial_env
-
- def attack(self, observation, agent_action, env_action, budget, previous_fails):
- if observation is None:
- return None, None
- current_step = self.env.nb_time_step
- if current_step not in self.attack_steps:
- return None, None
- index = self.attack_steps.index(current_step)
- return self.custom_attack[index], self.attack_duration[index]
+DEFAULT_ALERT_REWARD_PARAMS = dict(reward_min_no_blackout=-1.0,
+ reward_min_blackout=-10.0,
+ reward_max_no_blackout=1.0,
+ reward_max_blackout=2.0,
+ reward_end_episode_bonus=42.0)
# Test alert blackout / tets alert no blackout
@@ -117,7 +62,7 @@ def setUp(self) -> None:
opponent_action_class=PlayableAction,
opponent_class=OpponentForTestAlert,
kwargs_opponent=kwargs_opponent,
- reward_class=AlertReward(reward_end_episode_bonus=42),
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS),
_add_to_name="_tafta")
def tearDown(self) -> None:
@@ -232,7 +177,7 @@ def setUp(self) -> None:
opponent_action_class=PlayableAction,
opponent_class=OpponentForTestAlert,
kwargs_opponent=kwargs_opponent,
- reward_class=AlertReward(reward_end_episode_bonus=42),
+ reward_class=AlertReward(**DEFAULT_ALERT_REWARD_PARAMS),
_add_to_name="_tafto")
param = self.env.parameters
param.ALERT_TIME_WINDOW = 2
@@ -254,6 +199,35 @@ def _aux_obs_init(self, obs):
def test_init_observation(self) -> None :
obs : BaseObservation = self.env.reset()
self._aux_obs_init(obs)
+
+ def test_reset_obs(self) -> None :
+ obs1 : BaseObservation = self.env.reset()
+ assert (obs1.time_since_last_alert == np.array([-1, -1, -1, -1, -1, -1, -1, -1, -1, -1])).all()
+
+ obs2, reward, done, info = self.env.step(self.env.action_space({"raise_alert": [0]}))
+ assert (obs2.time_since_last_alert == np.array([0, -1, -1, -1, -1, -1, -1, -1, -1, -1])).all()
+
+ obs2bis, reward, done, info = self.env.step(self.env.action_space({"raise_alert": [1]}))
+ assert (obs2bis.time_since_last_alert == np.array([1, 0, -1, -1, -1, -1, -1, -1, -1, -1])).all()
+
+ obs3 : BaseObservation = self.env.reset()
+ assert (obs3.time_since_last_alert == obs1.time_since_last_alert).all()
+
+ def test_reset_reward(self) -> None :
+ obs1 : BaseObservation = self.env.reset()
+ assert self.env._reward_helper.template_reward._current_id == 0
+
+ obs2, reward, done, info = self.env.step(self.env.action_space({"raise_alert": [0]}))
+ assert self.env._reward_helper.template_reward._current_id == 1
+ assert self.env._reward_helper.template_reward._alert_launched.sum() == 1
+
+ obs, reward, done, info = self.env.step(self.env.action_space({"raise_alert": [1]}))
+ assert self.env._reward_helper.template_reward._current_id == 2
+ assert self.env._reward_helper.template_reward._alert_launched.sum() == 2
+
+ obs3 : BaseObservation = self.env.reset()
+ assert self.env._reward_helper.template_reward._current_id == 0
+ assert self.env._reward_helper.template_reward._alert_launched.sum() == 0
def _aux_alert_0(self, obs):
assert obs.active_alert[0]
diff --git a/grid2op/tests/test_alert_trust_score.py b/grid2op/tests/test_alert_trust_score.py
new file mode 100644
index 000000000..58a18e48d
--- /dev/null
+++ b/grid2op/tests/test_alert_trust_score.py
@@ -0,0 +1,1885 @@
+# Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
+# See AUTHORS.txt
+# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
+# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
+# you can obtain one at http://mozilla.org/MPL/2.0/.
+# SPDX-License-Identifier: MPL-2.0
+# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
+
+import warnings
+import numpy as np
+import unittest
+import os
+import tempfile
+from grid2op.Observation import BaseObservation
+from grid2op.tests.helper_path_test import *
+
+from grid2op import make
+from grid2op.Reward import _AlertTrustScore
+from grid2op.Parameters import Parameters
+from grid2op.Exceptions import Grid2OpException
+from grid2op.Runner import Runner
+from grid2op.Action import BaseAction, PlayableAction
+from grid2op.Agent import BaseAgent
+from grid2op.Episode import EpisodeData
+
+from _aux_opponent_for_test_alerts import (_get_steps_attack,
+ TestOpponent,
+ TestOpponentMultiLines,
+ _get_blackout)
+
+ATTACKED_LINE = "48_50_136"
+
+DEFAULT_PARAMS_TRUSTSCORE = dict(reward_min_no_blackout=-1.0,
+ reward_min_blackout=-50.0,
+ reward_max_no_blackout=0.0,
+ reward_max_blackout=0.0,
+ reward_end_episode_bonus=0.0,
+ min_score=-3.0)
+
+#a near copy of _normalisation_fun function from alertTrusScore. Use when, for to a given trustscore parametrization,
+# it is not easy to guess the score before hand, for a given scenario.
+# especially when reward_end_episode_bonus is non null for some non blackout cases
+def manual_score (cm_reward,cm_reward_min_ep,cm_reward_max_ep,max_score):
+
+ manual_standardized_score= np.round((cm_reward - cm_reward_min_ep) / (cm_reward_max_ep - cm_reward_min_ep + 1e-5), 4)
+ manual_score = DEFAULT_PARAMS_TRUSTSCORE["min_score"] + (
+ max_score - DEFAULT_PARAMS_TRUSTSCORE[
+ "min_score"]) * manual_standardized_score
+ return manual_score
+
+# Test alertTrustScore when no blackout and when blackout
+class TestAlertTrustScoreNoBlackout(unittest.TestCase):
+ """test the basic behavior of the assistant alert feature when no blackout occur """
+
+ def setUp(self) -> None:
+ """ WARNING: Parameter ALERT_TIME_WINDOW should be set to 2 in these test for the environment used
+ Max Iter should be set to 10
+ """
+ self.env_nm = os.path.join(
+ PATH_DATA_TEST, "l2rpn_idf_2023_with_alert"
+ )
+
+ #this is the test case no blakcout where it reaches max score
+ def test_assistant_trust_score_no_blackout_no_attack_no_alert(self) -> None :
+ """ When no blackout and no attack occur, and no alert is raised we expect a maximum score
+ at the end of the episode and cumulated reward equal to the end of episode bonus
+
+ Raises:
+ Grid2OpException: raise an exception if an attack occur
+ """
+ with make(
+ self.env_nm,
+ test=True,
+ difficulty="1",
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE)
+ ) as env:
+ env.seed(0)
+ env.reset()
+
+ done = False
+ for i in range(env.max_episode_duration()):
+ obs, score, done, info = env.step(env.action_space())
+ if info["opponent_attack_line"] is None :
+ if i == env.max_episode_duration()-1:
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks= env._reward_helper.template_reward.nb_last_attacks
+ assert total_nb_attacks==0
+ assert nb_last_attacks==0
+ assert env._reward_helper.template_reward.cumulated_reward==DEFAULT_PARAMS_TRUSTSCORE["reward_end_episode_bonus"]
+
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks,nb_last_attacks)
+ assert cm_reward_min_ep == 0.
+ assert cm_reward_max_ep == DEFAULT_PARAMS_TRUSTSCORE["reward_end_episode_bonus"]
+
+ assert score == env._reward_helper.template_reward.max_score
+ else :
+ assert score == 0
+ else :
+ raise Grid2OpException('No attack expected')
+
+ assert done
+
+ # this is the test case no blakcout where it reaches min score
+ def test_assistant_trust_score_no_blackout_attack_alert(self) -> None :
+ """When we raise an alert for an attack (at step 1)
+ and no blackout occur, we expect a minimum score
+ at the end of the episode if end of episode bonus is null (or above otherwise), a cumulated reward equal to reward_min_no_blackout + end of episode bonus.
+ score is otherwise 0 at other time steps
+ """
+ kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE],
+ duration=3,
+ steps_attack=[2])
+ with make(self.env_nm,
+ test=True,
+ difficulty="1",
+ opponent_attack_cooldown=0,
+ opponent_attack_duration=99999,
+ opponent_budget_per_ts=1000,
+ opponent_init_budget=10000.,
+ opponent_action_class=PlayableAction,
+ opponent_class=TestOpponent,
+ kwargs_opponent=kwargs_opponent,
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE),
+ _add_to_name="_tatsnba"
+ ) as env :
+ env.seed(0)
+ env.reset()
+ step = 0
+ for i in range(env.max_episode_duration()):
+ attackable_line_id = 0
+ act = env.action_space()
+ if i == 1 :
+ act = env.action_space({"raise_alert": [attackable_line_id]})
+ obs, score, done, info = env.step(act)
+ step += 1
+ if step in _get_steps_attack(kwargs_opponent):
+ assert info["opponent_attack_line"] is not None, f"no attack is detected at step {step}"
+ else:
+ assert info["opponent_attack_line"] is None, f"an attack is detected at step {step}"
+
+ if done:
+
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 0
+ assert total_nb_attacks == 1
+
+ cm_reward=env._reward_helper.template_reward.cumulated_reward
+ assert env._reward_helper.template_reward.cumulated_reward==DEFAULT_PARAMS_TRUSTSCORE["reward_end_episode_bonus"]+ DEFAULT_PARAMS_TRUSTSCORE["reward_min_no_blackout"]
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks,nb_last_attacks)
+ assert cm_reward_min_ep == DEFAULT_PARAMS_TRUSTSCORE["reward_min_no_blackout"]
+ assert cm_reward_max_ep == DEFAULT_PARAMS_TRUSTSCORE["reward_end_episode_bonus"]+DEFAULT_PARAMS_TRUSTSCORE["reward_max_no_blackout"]
+
+ assert score == DEFAULT_PARAMS_TRUSTSCORE["min_score"] # because reward_end_episode_bonus == 0
+ # Can Be used if reward_end_episode_bonus!=0
+ # assert score > DEFAULT_PARAMS_TRUSTSCORE["min_score"]
+ # assert score == manual_score (cm_reward,cm_reward_min_ep,cm_reward_max_ep,env._reward_helper.template_reward.max_score)
+
+ else :
+ assert score == 0
+
+ # this is the test case no blakcout where it reaches mean score ( a score in the middle)
+ def test_assistant_trust_score_no_blackout_2_attack_same_time_1_alert(self) -> None:
+ """ When we raise only 1 alert for 2 attacks at the same time (step 2) (considered as a single attack event)
+ but no blackout occur, we expect a mean score
+ at the end of the episode if no end of episode bonus,
+ a cumulated reward equal to (reward_max_no_blackout + reward_min_no_blackout)/2 end of episode bonus.
+ score is otherwise 0 at other time steps
+ """
+ kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE] + ['48_53_141'],
+ duration=3,
+ steps_attack=[2])
+ with make(self.env_nm,
+ test=True,
+ difficulty="1",
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE),
+ opponent_attack_cooldown=0,
+ opponent_attack_duration=99999,
+ opponent_budget_per_ts=1000,
+ opponent_init_budget=10000.,
+ opponent_action_class=PlayableAction,
+ opponent_class=TestOpponent,
+ kwargs_opponent=kwargs_opponent,
+ _add_to_name="_tatsnb2ast1a"
+ ) as env:
+ env.seed(0)
+ env.reset()
+ step = 0
+ for i in range(env.max_episode_duration()):
+ attackable_line_id = 0
+ act = env.action_space()
+ if step == 1:
+ act = env.action_space({"raise_alert": [attackable_line_id]})
+ obs, score, done, info = env.step(act)
+ step += 1
+ if step in _get_steps_attack(kwargs_opponent):
+ assert info["opponent_attack_line"] is not None, f"no attack is detected at step {step}"
+ else:
+ assert info["opponent_attack_line"] is None, f"an attack is detected at step {step}"
+
+ if done:
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 0
+ assert total_nb_attacks == 1 # 1 because two simultaneaous attacks is considered as a signgle attack event
+
+ cm_reward = env._reward_helper.template_reward.cumulated_reward
+ assert env._reward_helper.template_reward.cumulated_reward == (
+ DEFAULT_PARAMS_TRUSTSCORE["reward_min_no_blackout"] + \
+ DEFAULT_PARAMS_TRUSTSCORE["reward_max_no_blackout"]) / 2 + \
+ DEFAULT_PARAMS_TRUSTSCORE["reward_end_episode_bonus"]
+
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks, nb_last_attacks)
+ assert cm_reward_min_ep == DEFAULT_PARAMS_TRUSTSCORE[
+ "reward_min_no_blackout"] * total_nb_attacks
+ assert cm_reward_max_ep == DEFAULT_PARAMS_TRUSTSCORE["reward_end_episode_bonus"] + \
+ DEFAULT_PARAMS_TRUSTSCORE["reward_max_no_blackout"] * total_nb_attacks
+
+ max_score = env._reward_helper.template_reward.max_score
+ mean_score = (max_score + DEFAULT_PARAMS_TRUSTSCORE["min_score"]) / 2
+ assert score == mean_score # because reward_end_episode_bonus == 0
+ # Can Be used if reward_end_episode_bonus!=0
+ # assert score > mean_score
+ # assert score == manual_score (cm_reward,cm_reward_min_ep,cm_reward_max_ep,env._reward_helper.template_reward.max_score)
+ else:
+ assert score == 0
+
+ def test_assistant_trust_score_no_blackout_no_attack_alert(self) -> None:
+ """ When an alert is raised while no attack / nor blackout occur, we expect a maximum score
+ at the end of the episode and cumulated reward equal to the end of episode bonus
+
+ Raises:
+ Grid2OpException: raise an exception if an attack occur
+ """
+ with make(
+ self.env_nm,
+ test=True,
+ difficulty="1",
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE)
+ ) as env:
+ env.seed(0)
+ env.reset()
+
+ done = False
+ attackable_line_id = 0
+ step = 0
+ for i in range(env.max_episode_duration()):
+ act = env.action_space()
+ if step == 1:
+ act = env.action_space({"raise_alert": [attackable_line_id]})
+ obs, score, done, info = env.step(act)
+ step += 1
+
+ if info["opponent_attack_line"] is None:
+ if step == env.max_episode_duration():
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+ assert total_nb_attacks == 0
+ assert nb_last_attacks == 0
+
+ assert env._reward_helper.template_reward.cumulated_reward == DEFAULT_PARAMS_TRUSTSCORE[
+ "reward_end_episode_bonus"]
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks, nb_last_attacks)
+ assert cm_reward_min_ep == 0.
+ assert cm_reward_max_ep == DEFAULT_PARAMS_TRUSTSCORE["reward_end_episode_bonus"]
+
+ assert score == env._reward_helper.template_reward.max_score
+ else:
+ assert score == 0
+ else:
+ raise Grid2OpException('No attack expected')
+
+ assert done
+
+ # If attack
+ def test_assistant_trust_score_no_blackout_attack_no_alert(self) -> None:
+ """ When we don't raise an alert for an attack (at step 1)
+ and no blackout occur, we expect a maximum score
+ at the end of the episode, a cumulated reward equal to reward_max_no_blackout + end of episode bonus.
+ score is otherwise 0 at other time steps
+
+ """
+ kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE],
+ duration=3,
+ steps_attack=[1])
+ with make(self.env_nm,
+ test=True,
+ difficulty="1",
+ opponent_attack_cooldown=0,
+ opponent_attack_duration=99999,
+ opponent_budget_per_ts=1000,
+ opponent_init_budget=10000.,
+ opponent_action_class=PlayableAction,
+ opponent_class=TestOpponent,
+ kwargs_opponent=kwargs_opponent,
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE),
+ _add_to_name="_tatsnbana"
+ ) as env:
+ env.seed(0)
+ env.reset()
+ step = 0
+ for i in range(env.max_episode_duration()):
+ act = env.action_space()
+ obs, score, done, info = env.step(act)
+ step += 1
+ if step in _get_steps_attack(kwargs_opponent):
+ assert info["opponent_attack_line"] is not None, f"no attack is detected at step {step}"
+ else:
+ assert info["opponent_attack_line"] is None, f"an attack is detected at step {step}"
+
+ if done:
+ assert np.round(score, 3) == env._reward_helper.template_reward.max_score
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 0
+ assert total_nb_attacks == 1
+ assert env._reward_helper.template_reward.cumulated_reward == DEFAULT_PARAMS_TRUSTSCORE[
+ "reward_end_episode_bonus"] + DEFAULT_PARAMS_TRUSTSCORE["reward_max_no_blackout"]
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks, nb_last_attacks)
+ assert cm_reward_min_ep == DEFAULT_PARAMS_TRUSTSCORE["reward_min_no_blackout"]
+ assert cm_reward_max_ep == DEFAULT_PARAMS_TRUSTSCORE["reward_end_episode_bonus"] + \
+ DEFAULT_PARAMS_TRUSTSCORE["reward_max_no_blackout"]
+ else:
+ assert score == 0
+
+ def test_assistant_trust_score_no_blackout_attack_alert_too_late(self) -> None :
+ """ When we raise an alert too late for an attack (at step 2) but no blackout occur,
+ we expect a maximum score at the end of the episode,
+ a cumulated reward equal to reward_max_no_blackout + end of episode bonus.
+ score is otherwise 0 at other time steps
+
+
+ """
+ kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE],
+ duration=3,
+ steps_attack=[2])
+ with make(self.env_nm,
+ test=True,
+ difficulty="1",
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE),
+ opponent_attack_cooldown=0,
+ opponent_attack_duration=99999,
+ opponent_budget_per_ts=1000,
+ opponent_init_budget=10000.,
+ opponent_action_class=PlayableAction,
+ opponent_class=TestOpponent,
+ kwargs_opponent=kwargs_opponent,
+ _add_to_name="_tatsnbaatl"
+ ) as env :
+ env.seed(0)
+ env.reset()
+ step = 0
+ for i in range(env.max_episode_duration()):
+ attackable_line_id = 0
+ act = env.action_space()
+ if step == 2 :
+ act = env.action_space({"raise_alert": [attackable_line_id]})
+ obs, score, done, info = env.step(act)
+ step += 1
+ if step in _get_steps_attack(kwargs_opponent):
+ assert info["opponent_attack_line"] is not None, f"no attack is detected at step {step}"
+ else:
+ assert info["opponent_attack_line"] is None, f"an attack is detected at step {step}"
+
+ if done:
+ assert score == env._reward_helper.template_reward.max_score
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 0
+ assert total_nb_attacks == 1
+
+ assert env._reward_helper.template_reward.cumulated_reward==DEFAULT_PARAMS_TRUSTSCORE["reward_end_episode_bonus"] +\
+ DEFAULT_PARAMS_TRUSTSCORE["reward_max_no_blackout"]
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks,nb_last_attacks)
+ assert cm_reward_min_ep == DEFAULT_PARAMS_TRUSTSCORE["reward_min_no_blackout"]
+ assert cm_reward_max_ep == DEFAULT_PARAMS_TRUSTSCORE["reward_end_episode_bonus"]+DEFAULT_PARAMS_TRUSTSCORE["reward_max_no_blackout"]
+ else :
+ assert score == 0
+
+ def test_assistant_trust_score_no_blackout_attack_alert_too_early(self)-> None :
+ """ When we raise an alert too early for an attack (at step 2)
+ we expect a maximum score at the end of the episode,
+ a cumulated reward equal to reward_max_no_blackout + end of episode bonus.
+ score is otherwise 0 at other time steps
+
+
+ """
+ kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE],
+ duration=3,
+ steps_attack=[2])
+ with make(self.env_nm,
+ test=True,
+ difficulty="1",
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE),
+ opponent_attack_cooldown=0,
+ opponent_attack_duration=99999,
+ opponent_budget_per_ts=1000,
+ opponent_init_budget=10000.,
+ opponent_action_class=PlayableAction,
+ opponent_class=TestOpponent,
+ kwargs_opponent=kwargs_opponent,
+ _add_to_name="_tatsnbaate"
+ ) as env :
+ env.seed(0)
+ env.reset()
+ step = 0
+ for i in range(env.max_episode_duration()):
+ attackable_line_id = 0
+ act = env.action_space()
+ if step == 0 :
+ # An alert is raised at step 0
+ act = env.action_space({"raise_alert": [attackable_line_id]})
+ obs, score, done, info = env.step(act)
+ step += 1
+ if step in _get_steps_attack(kwargs_opponent):
+ assert info["opponent_attack_line"] is not None, f"no attack is detected at step {step}"
+ else:
+ assert info["opponent_attack_line"] is None, f"an attack is detected at step {step}"
+
+ if done:
+ assert score == env._reward_helper.template_reward.max_score
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 0
+ assert total_nb_attacks == 1
+
+ assert env._reward_helper.template_reward.cumulated_reward==DEFAULT_PARAMS_TRUSTSCORE["reward_end_episode_bonus"] +\
+ DEFAULT_PARAMS_TRUSTSCORE["reward_max_no_blackout"]
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks,nb_last_attacks)
+ assert cm_reward_min_ep == DEFAULT_PARAMS_TRUSTSCORE["reward_min_no_blackout"]
+ assert cm_reward_max_ep == DEFAULT_PARAMS_TRUSTSCORE["reward_end_episode_bonus"] + \
+ DEFAULT_PARAMS_TRUSTSCORE["reward_max_no_blackout"]
+
+ else :
+ assert score == 0
+
+ # 2 ligne attaquées
+ def test_assistant_trust_score_no_blackout_2_attack_same_time_no_alert(self) -> None :
+ """ When we don't raise an alert for 2 attacks at the same time (step 1) (considered as a single attack event)
+ but no blackout occur, we expect a maximum score
+ at the end of the episode, a cumulated reward equal to reward_max_no_blackout + end of episode bonus.
+ score is otherwise 0 at other time steps
+
+ """
+
+ kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'],
+ duration=3,
+ steps_attack=[1])
+ with make(self.env_nm,
+ test=True,
+ difficulty="1",
+ opponent_attack_cooldown=0,
+ opponent_attack_duration=99999,
+ opponent_budget_per_ts=1000,
+ opponent_init_budget=10000.,
+ opponent_action_class=PlayableAction,
+ opponent_class=TestOpponent,
+ kwargs_opponent=kwargs_opponent,
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE),
+ _add_to_name="_tatsnb2astna"
+ ) as env :
+ env.seed(0)
+ env.reset()
+ step = 0
+ for i in range(env.max_episode_duration()):
+ act = env.action_space()
+ obs, score, done, info = env.step(act)
+ step += 1
+ if step in _get_steps_attack(kwargs_opponent):
+ assert info["opponent_attack_line"] is not None, f"no attack is detected at step {step}"
+ else:
+ assert info["opponent_attack_line"] is None, f"an attack is detected at step {step}"
+
+ if done:
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 0
+ assert total_nb_attacks == 1 #1 because to simultaneaous attacks is considered as a signgle attack event
+
+ assert env._reward_helper.template_reward.cumulated_reward==DEFAULT_PARAMS_TRUSTSCORE["reward_end_episode_bonus"] +\
+ DEFAULT_PARAMS_TRUSTSCORE["reward_max_no_blackout"]*total_nb_attacks
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks,nb_last_attacks)
+ assert cm_reward_min_ep == DEFAULT_PARAMS_TRUSTSCORE["reward_min_no_blackout"]*total_nb_attacks
+ assert cm_reward_max_ep == DEFAULT_PARAMS_TRUSTSCORE["reward_end_episode_bonus"] + \
+ DEFAULT_PARAMS_TRUSTSCORE["reward_max_no_blackout"]*total_nb_attacks
+
+ assert score == env._reward_helper.template_reward.max_score
+ else :
+ assert score == 0
+
+ def test_assistant_trust_score_no_blackout_2_attack_same_time_2_alert(self) -> None :
+ """ When we raise 2 alerts for 2 attacks at the same time (step 2) (considered as a single attack event)
+ but no blackout occur, we expect a minimum score
+ at the end of the episode if no end of episode bonus,
+ a cumulated reward equal to reward_min_no_blackout + end of episode bonus.
+ score is otherwise 0 at other time steps
+ """
+ kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'],
+ duration=3,
+ steps_attack=[2])
+ with make(self.env_nm,
+ test=True,
+ difficulty="1",
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE),
+ opponent_attack_cooldown=0,
+ opponent_attack_duration=99999,
+ opponent_budget_per_ts=1000,
+ opponent_init_budget=10000.,
+ opponent_action_class=PlayableAction,
+ opponent_class=TestOpponent,
+ kwargs_opponent=kwargs_opponent,
+ _add_to_name="_tatsnb2ast2a"
+ ) as env :
+ env.seed(0)
+ env.reset()
+ step = 0
+ for i in range(env.max_episode_duration()):
+ attackable_line_ids = [0, 1]
+ act = env.action_space()
+ if step == 1 :
+ act = env.action_space({"raise_alert": attackable_line_ids})
+ obs, score, done, info = env.step(act)
+ step += 1
+ if step in _get_steps_attack(kwargs_opponent):
+ assert info["opponent_attack_line"] is not None, f"no attack is detected at step {step}"
+ else:
+ assert info["opponent_attack_line"] is None, f"an attack is detected at step {step}"
+
+ if done:
+
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 0
+ assert total_nb_attacks == 1 #1 because to simultaneaous attacks is considered as a signgle attack event
+
+ cm_reward=env._reward_helper.template_reward.cumulated_reward
+ assert env._reward_helper.template_reward.cumulated_reward==DEFAULT_PARAMS_TRUSTSCORE["reward_end_episode_bonus"] + \
+ DEFAULT_PARAMS_TRUSTSCORE["reward_min_no_blackout"]
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks,nb_last_attacks)
+ assert cm_reward_min_ep == DEFAULT_PARAMS_TRUSTSCORE["reward_min_no_blackout"] * total_nb_attacks
+ assert cm_reward_max_ep == DEFAULT_PARAMS_TRUSTSCORE["reward_end_episode_bonus"] + \
+ DEFAULT_PARAMS_TRUSTSCORE["reward_max_no_blackout"] * total_nb_attacks
+
+ assert score == DEFAULT_PARAMS_TRUSTSCORE["min_score"] # because reward_end_episode_bonus == 0
+ # Can Be used if reward_end_episode_bonus!=0
+ # assert score > DEFAULT_PARAMS_TRUSTSCORE["min_score"]
+ # assert score == manual_score (cm_reward,cm_reward_min_ep,cm_reward_max_ep,env._reward_helper.template_reward.max_score)
+ else :
+ assert score == 0
+
+
+ def test_assistant_trust_score_no_blackout_2_attack_diff_time_no_alert(self) -> None :
+ """ When we raise 2 alerts for 2 attacks at the same time (step 2)
+ but no blackout occur, we expect a maximum score at the end of the episode,
+ a cumulated reward equal to 2*reward_max_no_blackout + end of episode bonus.
+ score is otherwise 0 at other time steps
+ """
+
+ kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'],
+ duration=[1, 1],
+ steps_attack=[1, 2])
+ with make(self.env_nm,
+ test=True,
+ difficulty="1",
+ opponent_attack_cooldown=0,
+ opponent_attack_duration=99999,
+ opponent_budget_per_ts=1000,
+ opponent_init_budget=10000.,
+ opponent_action_class=PlayableAction,
+ opponent_class=TestOpponentMultiLines,
+ kwargs_opponent=kwargs_opponent,
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE),
+ _add_to_name="_tatsnb2dtna"
+ ) as env :
+ env.seed(0)
+ env.reset()
+ step = 0
+ for i in range(env.max_episode_duration()):
+ act = env.action_space()
+ obs, score, done, info = env.step(act)
+ step += 1
+
+ if step in _get_steps_attack(kwargs_opponent, multi=True) :
+ assert info["opponent_attack_line"] is not None, f"no attack is detected at step {step}"
+ else:
+ assert info["opponent_attack_line"] is None, f"an attack is detected at step {step}"
+
+ if done:
+ assert score == env._reward_helper.template_reward.max_score
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 0
+ assert total_nb_attacks == 2
+
+ assert env._reward_helper.template_reward.cumulated_reward==DEFAULT_PARAMS_TRUSTSCORE["reward_end_episode_bonus"] +\
+ total_nb_attacks*DEFAULT_PARAMS_TRUSTSCORE["reward_max_no_blackout"]
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks,nb_last_attacks)
+ assert cm_reward_min_ep == DEFAULT_PARAMS_TRUSTSCORE["reward_min_no_blackout"] * total_nb_attacks
+ assert cm_reward_max_ep == DEFAULT_PARAMS_TRUSTSCORE["reward_end_episode_bonus"] + \
+ DEFAULT_PARAMS_TRUSTSCORE["reward_max_no_blackout"] * total_nb_attacks
+ else :
+ assert score == 0
+
+ def test_assistant_trust_score_no_blackout_2_attack_diff_time_2_alert(self) -> None :
+ """ When we raise 2 alerts for 2 attacks at the same time (step 2)
+ but no blackout occur, we expect a minimum score at the end of the episode if no bonus,
+ a cumulated reward equal to 2*reward_min_no_blackout + end of episode bonus.
+ score is otherwise 0 at other time steps
+ """
+
+ kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'],
+ duration=[1,1],
+ steps_attack=[2, 3])
+ with make(self.env_nm,
+ test=True,
+ difficulty="1",
+ opponent_attack_cooldown=0,
+ opponent_attack_duration=99999,
+ opponent_budget_per_ts=1000,
+ opponent_init_budget=10000.,
+ opponent_action_class=PlayableAction,
+ opponent_class=TestOpponentMultiLines,
+ kwargs_opponent=kwargs_opponent,
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE),
+ _add_to_name="_tatsnb2dt2a"
+ ) as env :
+ env.seed(0)
+ env.reset()
+ step = 0
+ for i in range(env.max_episode_duration()):
+ act = env.action_space()
+ if step == 1 :
+ act = env.action_space({"raise_alert": [0]})
+ elif step == 2 :
+ act = env.action_space({"raise_alert": [1]})
+ obs, score, done, info = env.step(act)
+ step += 1
+ if step in _get_steps_attack(kwargs_opponent, multi=True):
+ assert info["opponent_attack_line"] is not None, f"no attack is detected at step {step}"
+ else:
+ assert info["opponent_attack_line"] is None, f"an attack is detected at step {step}"
+
+ if done:
+
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 0
+ assert total_nb_attacks == 2
+
+ cm_reward=env._reward_helper.template_reward.cumulated_reward
+ assert env._reward_helper.template_reward.cumulated_reward == DEFAULT_PARAMS_TRUSTSCORE[
+ "reward_end_episode_bonus"] + \
+ total_nb_attacks * DEFAULT_PARAMS_TRUSTSCORE["reward_min_no_blackout"]
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks,nb_last_attacks)
+ assert cm_reward_min_ep == DEFAULT_PARAMS_TRUSTSCORE["reward_min_no_blackout"] * total_nb_attacks
+ assert cm_reward_max_ep == DEFAULT_PARAMS_TRUSTSCORE["reward_end_episode_bonus"] + \
+ DEFAULT_PARAMS_TRUSTSCORE["reward_max_no_blackout"] * total_nb_attacks
+
+ assert score == DEFAULT_PARAMS_TRUSTSCORE["min_score"] # because reward_end_episode_bonus == 0
+ # Can Be used if reward_end_episode_bonus!=0
+ # assert score > DEFAULT_PARAMS_TRUSTSCORE["min_score"]
+ # assert score == manual_score (cm_reward,cm_reward_min_ep,cm_reward_max_ep,env._reward_helper.template_reward.max_score)
+ else :
+ assert score == 0
+
+ def test_assistant_trust_score_no_blackout_2_attack_diff_time_alert_first_attack(self) -> None :
+ """ When we raise 2 alerts for 2 attacks at the same time (step 2)
+ but no blackout occur, we expect a mean score at the end of the episode if no bonus,
+ a cumulated reward equal to reward_max_no_blackout + reward_min_no_blackout + end of episode bonus.
+ score is otherwise 0 at other time steps
+ """
+
+ kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'],
+ duration=[1,1],
+ steps_attack=[2, 3])
+ with make(self.env_nm,
+ test=True,
+ difficulty="1",
+ opponent_attack_cooldown=0,
+ opponent_attack_duration=99999,
+ opponent_budget_per_ts=1000,
+ opponent_init_budget=10000.,
+ opponent_action_class=PlayableAction,
+ opponent_class=TestOpponentMultiLines,
+ kwargs_opponent=kwargs_opponent,
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE),
+ _add_to_name="_tatsnb2dtafa"
+ ) as env :
+ env.seed(0)
+ env.reset()
+ step = 0
+ for i in range(env.max_episode_duration()):
+ act = env.action_space()
+ if step == 1 :
+ act = env.action_space({"raise_alert": [0]})
+ obs, score, done, info = env.step(act)
+ step += 1
+ if step in _get_steps_attack(kwargs_opponent, multi=True):
+ assert info["opponent_attack_line"] is not None, f"no attack is detected at step {step}"
+ else:
+ assert info["opponent_attack_line"] is None, f"an attack is detected at step {step}"
+
+ if done:
+
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 0
+ assert total_nb_attacks == 2
+
+ cm_reward=env._reward_helper.template_reward.cumulated_reward
+ assert env._reward_helper.template_reward.cumulated_reward == DEFAULT_PARAMS_TRUSTSCORE[
+ "reward_end_episode_bonus"] + \
+ DEFAULT_PARAMS_TRUSTSCORE["reward_min_no_blackout"]+DEFAULT_PARAMS_TRUSTSCORE["reward_max_no_blackout"]
+
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks,nb_last_attacks)
+ assert cm_reward_min_ep == DEFAULT_PARAMS_TRUSTSCORE["reward_min_no_blackout"] * total_nb_attacks
+ assert cm_reward_max_ep == DEFAULT_PARAMS_TRUSTSCORE["reward_end_episode_bonus"] + \
+ DEFAULT_PARAMS_TRUSTSCORE["reward_max_no_blackout"] * total_nb_attacks
+
+ max_score=env._reward_helper.template_reward.max_score
+ mean_score=(max_score + DEFAULT_PARAMS_TRUSTSCORE["min_score"]) / 2
+
+ assert score == mean_score # because reward_end_episode_bonus == 0
+ # Can Be used if reward_end_episode_bonus!=0
+ # assert score > mean_score
+ # assert score == manual_score (cm_reward,cm_reward_min_ep,cm_reward_max_ep,env._reward_helper.template_reward.max_score)
+
+ else :
+ assert score == 0
+
+
+ def test_assistant_trust_score_no_blackout_2_attack_diff_time_alert_second_attack(self) -> None :
+ """ When we raise 1 alert on the second attack while we have 2 attacks at two times (steps 2 and 3)
+ but no blackout occur, we expect a mean score at the end of the episode if no bonus,
+ a cumulated reward equal to reward_max_no_blackout + reward_min_no_blackout + end of episode bonus.
+ score is otherwise 0 at other time steps
+ """
+ kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'],
+ duration=[1,1],
+ steps_attack=[2, 3])
+ with make(self.env_nm,
+ test=True,
+ difficulty="1",
+ opponent_attack_cooldown=0,
+ opponent_attack_duration=99999,
+ opponent_budget_per_ts=1000,
+ opponent_init_budget=10000.,
+ opponent_action_class=PlayableAction,
+ opponent_class=TestOpponentMultiLines,
+ kwargs_opponent=kwargs_opponent,
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE),
+ _add_to_name="_tatsnb2dtasa"
+ ) as env :
+ env.seed(0)
+ env.reset()
+ step = 0
+ for i in range(env.max_episode_duration()):
+ act = env.action_space()
+ if i == 2 :
+ act = env.action_space({"raise_alert": [1]})
+ obs, score, done, info = env.step(act)
+ step += 1
+ if step in _get_steps_attack(kwargs_opponent, multi=True):
+ assert info["opponent_attack_line"] is not None, f"no attack is detected at step {step}"
+ else:
+ assert info["opponent_attack_line"] is None, f"an attack is detected at step {step}"
+
+ if done:
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 0
+ assert total_nb_attacks == 2
+
+ cm_reward=env._reward_helper.template_reward.cumulated_reward
+ assert env._reward_helper.template_reward.cumulated_reward == DEFAULT_PARAMS_TRUSTSCORE[
+ "reward_end_episode_bonus"] + \
+ DEFAULT_PARAMS_TRUSTSCORE["reward_min_no_blackout"] + DEFAULT_PARAMS_TRUSTSCORE[
+ "reward_max_no_blackout"]
+
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks,nb_last_attacks)
+ assert cm_reward_min_ep == DEFAULT_PARAMS_TRUSTSCORE["reward_min_no_blackout"] * total_nb_attacks
+ assert cm_reward_max_ep == DEFAULT_PARAMS_TRUSTSCORE["reward_end_episode_bonus"] + \
+ DEFAULT_PARAMS_TRUSTSCORE["reward_max_no_blackout"] * total_nb_attacks
+
+ max_score=env._reward_helper.template_reward.max_score
+ mean_score=(max_score + DEFAULT_PARAMS_TRUSTSCORE["min_score"]) / 2
+
+ assert score == mean_score # because reward_end_episode_bonus == 0
+ # Can Be used if reward_end_episode_bonus!=0
+ # assert score > mean_score
+ # assert score == manual_score (cm_reward,cm_reward_min_ep,cm_reward_max_ep,env._reward_helper.template_reward.max_score)
+ else :
+ assert score == 0, f"error for step {step}: {score} vs 0"
+
+class TestAlertTrustScoreBlackout_NoAttackCause(unittest.TestCase):
+
+ def setUp(self) -> None:
+ """ WARNING: Parameter ALERT_TIME_WINDOW should be set to 2 in these test for the environment used
+ Max Iter should be set to 10"""
+ self.env_nm = os.path.join(
+ PATH_DATA_TEST, "l2rpn_idf_2023_with_alert"
+ )
+
+ def get_dn(self, env):
+ return env.action_space({})
+
+ def get_blackout(self, env):
+ return _get_blackout(env.action_space)
+
+ # this is the test case a blackout occur but not because of an attack and you get a maximum score
+ def test_assistant_trust_score_blackout_attack_nocause_blackout_no_alert(self) -> None:
+ """When 1 line is attacked at step 3 and you don't raise an alert
+ and a blackout occur at step 7 (not considered as because of the attack because outside of the alert time window)
+ we expect a minimum score,
+ a cumulated reward equal to reward_max_no_blackout
+ score is otherwise 0 at other time steps
+ """
+ kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE],
+ duration=3,
+ steps_attack=[3])
+ with make(self.env_nm,
+ test=True,
+ difficulty="1",
+ opponent_attack_cooldown=0,
+ opponent_attack_duration=99999,
+ opponent_budget_per_ts=1000,
+ opponent_init_budget=10000.,
+ opponent_action_class=PlayableAction,
+ opponent_class=TestOpponent,
+ kwargs_opponent=kwargs_opponent,
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE),
+ _add_to_name="_tatsbarga"
+ ) as env:
+ new_param = Parameters()
+ new_param.MAX_LINE_STATUS_CHANGED = 10
+
+ env.change_parameters(new_param)
+ env.seed(0)
+ env.reset()
+ step = 0
+ for i in range(env.max_episode_duration()):
+ attackable_line_id = 0
+ act = self.get_dn(env)
+ if i == 7:
+ act = self.get_blackout(env)
+ obs, score, done, info = env.step(act)
+ step += 1
+ if step in _get_steps_attack(kwargs_opponent):
+ assert info["opponent_attack_line"] is not None, f"no attack is detected at step {step}"
+ else:
+ assert info["opponent_attack_line"] is None, f"an attack is detected at step {step}"
+
+ if done:
+ assert score == env._reward_helper.template_reward.max_score
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 0 # because no attack caused the blackout
+ assert total_nb_attacks == 1
+
+ assert env._reward_helper.template_reward.cumulated_reward == DEFAULT_PARAMS_TRUSTSCORE[
+ 'reward_max_no_blackout']
+
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks, nb_last_attacks)
+ assert cm_reward_min_ep == DEFAULT_PARAMS_TRUSTSCORE['reward_min_no_blackout']
+ assert cm_reward_max_ep == DEFAULT_PARAMS_TRUSTSCORE['reward_max_no_blackout']
+ break
+ else:
+ assert score == 0
+
+ # this is the test case a blackout occur but not because of an attack and you get a minimum score
+ def test_assistant_trust_score_blackout_attack_nocause_blackout_raise_alert(self) -> None:
+ """When 1 line is attacked at step 3 and we raise an alert
+ and a blackout occur at step 7 (not considered as because of the attack because outside of the alert time window)
+ we expect a minimum score,
+ a cumulated reward equal to reward_min_no_blackout
+ score is otherwise 0 at other time steps
+ """
+ kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE],
+ duration=3,
+ steps_attack=[3])
+ with make(self.env_nm,
+ test=True,
+ difficulty="1",
+ opponent_attack_cooldown=0,
+ opponent_attack_duration=99999,
+ opponent_budget_per_ts=1000,
+ opponent_init_budget=10000.,
+ opponent_action_class=PlayableAction,
+ opponent_class=TestOpponent,
+ kwargs_opponent=kwargs_opponent,
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE),
+ _add_to_name="_tatsbarga"
+ ) as env:
+ new_param = Parameters()
+ new_param.MAX_LINE_STATUS_CHANGED = 10
+
+ env.change_parameters(new_param)
+ env.seed(0)
+ env.reset()
+ step = 0
+ for i in range(env.max_episode_duration()):
+ attackable_line_id = 0
+ act = self.get_dn(env)
+ if i == 7:
+ act = self.get_blackout(env)
+ elif i == 2:
+ # I raise the alert (on the right line) just before the opponent attack
+ # opp attack at step = 3, so i = 2
+ act = env.action_space({"raise_alert": [attackable_line_id]})
+ obs, score, done, info = env.step(act)
+ step += 1
+ if step in _get_steps_attack(kwargs_opponent):
+ assert info["opponent_attack_line"] is not None, f"no attack is detected at step {step}"
+ else:
+ assert info["opponent_attack_line"] is None, f"an attack is detected at step {step}"
+
+ if done:
+ assert score == DEFAULT_PARAMS_TRUSTSCORE["min_score"]
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 0 # because no attack caused the blackout
+ assert total_nb_attacks == 1
+
+ assert env._reward_helper.template_reward.cumulated_reward == DEFAULT_PARAMS_TRUSTSCORE[
+ 'reward_min_no_blackout']
+
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks, nb_last_attacks)
+ assert cm_reward_min_ep == DEFAULT_PARAMS_TRUSTSCORE['reward_min_no_blackout']
+ assert cm_reward_max_ep == DEFAULT_PARAMS_TRUSTSCORE['reward_max_no_blackout']
+ break
+ else:
+ assert score == 0
+
+
+
+ # this is the test case a blackout occur but not because of an attack and you get a score of 0 (in the middle)
+ def test_assistant_trust_score_blackout_no_attack_alert(self) -> None:
+
+ """Even if there is a blackout, an we raise an alert
+ we expect a score of 0 because there is no attack and we don't finish the scenario"""
+ with make(
+ self.env_nm,
+ test=True,
+ difficulty="1",
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE)
+ ) as env:
+ env.seed(0)
+ env.reset()
+
+ done = False
+ for i in range(env.max_episode_duration()):
+ act = self.get_dn(env)
+ if i == 3:
+ act = self.get_blackout(env)
+ elif i == 1:
+ act = env.action_space({"raise_alert": [0]})
+ obs, score, done, info = env.step(act)
+ if info["opponent_attack_line"] is None:
+ if done: # info["opponent_attack_line"] is None :
+ assert score == 0.
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 0
+ assert total_nb_attacks == 0
+
+ assert env._reward_helper.template_reward.total_nb_attacks == 0.
+ assert env._reward_helper.template_reward.cumulated_reward == 0.
+
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks, nb_last_attacks)
+
+ assert cm_reward_min_ep == 0.0
+ assert cm_reward_max_ep == 0.0
+ else:
+ raise Grid2OpException('No attack expected')
+
+ if done:
+ break
+
+ assert done
+
+ # return 0
+ def test_assistant_trust_score_blackout_no_attack_no_alert(self) -> None:
+ """Even if there is a blackout, an we don't raise an alert
+ we expect a score of 0 because there is no attack and we don't finish the scenario"""
+ with make(
+ self.env_nm,
+ test=True,
+ difficulty="1",
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE)
+ ) as env:
+ env.seed(0)
+ env.reset()
+
+ done = False
+ for i in range(env.max_episode_duration()):
+ act = self.get_dn(env)
+ if i == 3:
+ act = self.get_blackout(env)
+ obs, score, done, info = env.step(act)
+ if info["opponent_attack_line"] is None:
+ if done:
+ assert score == 0.
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 0
+ assert total_nb_attacks == 0
+
+ assert env._reward_helper.template_reward.total_nb_attacks == 0.
+ assert env._reward_helper.template_reward.cumulated_reward == 0.
+
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks, nb_last_attacks)
+
+ assert cm_reward_min_ep == 0.0
+ assert cm_reward_max_ep == 0.0
+ else:
+ raise Grid2OpException('No attack expected')
+
+ if done:
+ break
+
+ assert done
+
+ # return 0
+ def test_assistant_trust_score_blackout_no_attack_before_window_alert(self) -> None:
+ """Even if there is a blackout, an we raise an alert too early
+ we expect a score of 0 because there is no attack and we don't finish the scenario"""
+ with make(
+ self.env_nm,
+ test=True,
+ difficulty="1",
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE)
+ ) as env:
+ env.seed(0)
+ env.reset()
+
+ done = False
+ for i in range(env.max_episode_duration()):
+ act = self.get_dn(env)
+ if i == 3:
+ act = self.get_blackout(env)
+ elif i in [0, 1, 2]:
+ act = env.action_space({"raise_alert": [0]})
+ obs, score, done, info = env.step(act)
+ if info["opponent_attack_line"] is None:
+ assert score == 0.
+ if done:
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 0
+ assert total_nb_attacks == 0
+
+ assert env._reward_helper.template_reward.total_nb_attacks == 0.
+ assert env._reward_helper.template_reward.cumulated_reward == 0.
+
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks, nb_last_attacks)
+
+ assert cm_reward_min_ep == 0.0
+ assert cm_reward_max_ep == 0.0
+ else:
+ raise Grid2OpException('No attack expected')
+
+ if done:
+ break
+
+ assert done
+
+ # return 0
+ def test_assistant_trust_score_blackout_no_attack_before_window_no_alert(self) -> None:
+ """Even if there is a blackout, an we raise an alert too late
+ we expect a score of 0 because there is no attack and we don't finish the scenario"""
+ with make(
+ self.env_nm,
+ test=True,
+ difficulty="1",
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE)
+ ) as env:
+ env.seed(0)
+ env.reset()
+
+ done = False
+ for i in range(env.max_episode_duration()):
+ act = self.get_dn(env)
+ if i == 3:
+ act = self.get_blackout(env)
+ elif i == 4:
+ # we never go here ...
+ act = env.action_space({"raise_alert": [0]})
+ obs, score, done, info = env.step(act)
+
+ if info["opponent_attack_line"] is None:
+ assert score == 0.
+ if done:
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 0
+ assert total_nb_attacks == 0
+
+ assert env._reward_helper.template_reward.total_nb_attacks == 0.
+ assert env._reward_helper.template_reward.cumulated_reward == 0.
+
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks, nb_last_attacks)
+
+ assert cm_reward_min_ep == 0.0
+ assert cm_reward_max_ep == 0.0
+ else:
+ raise Grid2OpException('No attack expected')
+
+ if done:
+ break
+
+ assert done
+
+
+class TestAlertTrustScoreBlackout_CauseAttack(unittest.TestCase):
+ """test the basic bahavior of the assistant alert feature when a blackout occur"""
+
+ def setUp(self) -> None:
+ """ WARNING: Parameter ALERT_TIME_WINDOW should be set to 2 in these test for the environment used
+ Max Iter should be set to 10"""
+ self.env_nm = os.path.join(
+ PATH_DATA_TEST, "l2rpn_idf_2023_with_alert"
+ )
+
+
+ def get_dn(self, env):
+ return env.action_space({})
+
+ def get_blackout(self, env):
+ return _get_blackout(env.action_space)
+
+ # this is the test case blakcout with attack where it reaches maximum score
+ def test_assistant_trust_score_blackout_attack_raise_good_alert(self) -> None :
+ """When 1 line is attacked at step 3 and we raise a good alert
+ and a blackout occur at step 4, we expect a maximum score,
+ a cumulated reward equal to reward_max_blackout
+ score is otherwise 0 at other time steps
+ """
+ kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE],
+ duration=3,
+ steps_attack=[3])
+ with make(self.env_nm,
+ test=True,
+ difficulty="1",
+ opponent_attack_cooldown=0,
+ opponent_attack_duration=99999,
+ opponent_budget_per_ts=1000,
+ opponent_init_budget=10000.,
+ opponent_action_class=PlayableAction,
+ opponent_class=TestOpponent,
+ kwargs_opponent=kwargs_opponent,
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE),
+ _add_to_name="_tatsbarga"
+ ) as env :
+ new_param = Parameters()
+ new_param.MAX_LINE_STATUS_CHANGED = 10
+
+ env.change_parameters(new_param)
+ env.seed(0)
+ env.reset()
+ step = 0
+ for i in range(env.max_episode_duration()):
+ attackable_line_id = 0
+ act = self.get_dn(env)
+ if i == 3 :
+ act = self.get_blackout(env)
+ elif i == 2:
+ # I raise the alert (on the right line) just before the opponent attack
+ # opp attack at step = 3, so i = 2
+ act = env.action_space({"raise_alert": [attackable_line_id]})
+ obs, score, done, info = env.step(act)
+ step += 1
+ if step in _get_steps_attack(kwargs_opponent):
+ assert info["opponent_attack_line"] is not None, f"no attack is detected at step {step}"
+ else:
+ assert info["opponent_attack_line"] is None, f"an attack is detected at step {step}"
+
+ if done:
+ assert score == env._reward_helper.template_reward.max_score
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 1 # because blackout caused by attack
+ assert total_nb_attacks == 1
+
+ assert env._reward_helper.template_reward.cumulated_reward==DEFAULT_PARAMS_TRUSTSCORE['reward_max_blackout']
+
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks,nb_last_attacks)
+ assert cm_reward_min_ep == DEFAULT_PARAMS_TRUSTSCORE['reward_min_blackout']
+ assert cm_reward_max_ep == DEFAULT_PARAMS_TRUSTSCORE['reward_max_blackout']
+ break
+ else :
+ assert score == 0
+
+ # this is the test case blakcout with attack where it reaches minimum score
+ def test_assistant_trust_score_blackout_attack_raise_alert_just_before_blackout(self) -> None :
+ """
+ When 1 line is attacked at step 3 and we raise 1 alert too late
+ and a blackout occur at step 4, we expect a minimum score,
+ a cumulated reward equal to reward_min_blackout
+ score is otherwise 0 at other time steps
+ """
+ kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE],
+ duration=3,
+ steps_attack=[3])
+ with make(self.env_nm,
+ test=True,
+ difficulty="1",
+ opponent_attack_cooldown=0,
+ opponent_attack_duration=99999,
+ opponent_budget_per_ts=1000,
+ opponent_init_budget=10000.,
+ opponent_action_class=PlayableAction,
+ opponent_class=TestOpponent,
+ kwargs_opponent=kwargs_opponent,
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE),
+ _add_to_name="_tatsbarajbb"
+ ) as env :
+ new_param = Parameters()
+ new_param.MAX_LINE_STATUS_CHANGED = 10
+
+ env.change_parameters(new_param)
+ env.seed(0)
+ env.reset()
+ step = 0
+ for i in range(env.max_episode_duration()):
+ attackable_line_id = 0
+ act = self.get_dn(env)
+ if i == 3 :
+ act = self.get_blackout(env)
+ elif i == 1:
+ # opponent attack at step 3, so when i = 2
+ # i raise the alert BEFORE that (so when i = 1)
+ act = env.action_space({"raise_alert": [attackable_line_id]})
+ obs, score, done, info = env.step(act)
+ step += 1
+ if step in _get_steps_attack(kwargs_opponent):
+ assert info["opponent_attack_line"] is not None, f"no attack is detected at step {step}"
+ else:
+ assert info["opponent_attack_line"] is None, f"an attack is detected at step {step}"
+
+ if done:
+ assert score == DEFAULT_PARAMS_TRUSTSCORE["min_score"]
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 1 # because blackout caused by attack
+ assert total_nb_attacks == 1
+
+ assert env._reward_helper.template_reward.cumulated_reward==DEFAULT_PARAMS_TRUSTSCORE['reward_min_blackout']
+
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks,nb_last_attacks)
+ assert cm_reward_min_ep == DEFAULT_PARAMS_TRUSTSCORE['reward_min_blackout']
+ assert cm_reward_max_ep == DEFAULT_PARAMS_TRUSTSCORE['reward_max_blackout']
+ break
+ else :
+ assert score == 0
+
+ # this is the test case blakcout with attack where it reaches a mean score (in the middle)
+ def test_assistant_trust_score_blackout_2_lines_attacked_simulaneous_only_1_alert(self) -> None:
+ """
+ When 2 lines are attacked simultaneously (considered as a single attack event) at step 2 and we raise only 1 alert
+ and a blackout occur at step 4, we expect a mean score,
+ a cumulated reward equal to (reward_max_blackout + reward_min_blackout)/2
+ score is otherwise 0 at other time steps
+ """
+ kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE] + ['48_53_141'],
+ duration=3,
+ steps_attack=[3, 3])
+ with make(self.env_nm,
+ test=True,
+ difficulty="1",
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE),
+ opponent_attack_cooldown=0,
+ opponent_attack_duration=99999,
+ opponent_budget_per_ts=1000,
+ opponent_init_budget=10000.,
+ opponent_action_class=PlayableAction,
+ opponent_class=TestOpponent,
+ kwargs_opponent=kwargs_opponent,
+ _add_to_name="_tatsb2laso1a"
+ ) as env:
+ new_param = Parameters()
+ new_param.MAX_LINE_STATUS_CHANGED = 10
+
+ env.change_parameters(new_param)
+ env.seed(0)
+ env.reset()
+ step = 0
+ for i in range(env.max_episode_duration()):
+ attackable_line_id = 0
+ act = self.get_dn(env)
+ if i == 3:
+ act = self.get_blackout(env)
+ elif i == 2:
+ # attack at step 3, so i = 2, which is the
+ # right time to send an alert
+ act = env.action_space({"raise_alert": [0]})
+ obs, score, done, info = env.step(act)
+ step += 1
+
+ if step in _get_steps_attack(kwargs_opponent):
+ assert info["opponent_attack_line"] is not None, f"no attack is detected at step {step}"
+ else:
+ assert info["opponent_attack_line"] is None, f"an attack is detected at step {step}"
+
+ if done:
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 1 # because blackout caused by attack
+ assert total_nb_attacks == 1 # 1 because two simultaneaous attacks is considered as a signgle attack event
+
+ cm_reward = env._reward_helper.template_reward.cumulated_reward
+ assert cm_reward == (DEFAULT_PARAMS_TRUSTSCORE[
+ 'reward_max_blackout'] + DEFAULT_PARAMS_TRUSTSCORE[
+ 'reward_min_blackout']) / 2 # 2 here because there are two attacks at the same time, so we take the mean of the individual alert scores
+
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks, nb_last_attacks)
+ # attention, attaque dans une même fenêtre avant blackout ne compte que pour une seule attaque pondérée...
+ assert cm_reward_min_ep == DEFAULT_PARAMS_TRUSTSCORE['reward_min_blackout']
+ assert cm_reward_max_ep == DEFAULT_PARAMS_TRUSTSCORE['reward_max_blackout']
+
+ max_score = env._reward_helper.template_reward.max_score
+ mean_score = (DEFAULT_PARAMS_TRUSTSCORE['min_score'] + max_score) / 2
+ assert score == mean_score
+ break
+ else:
+ assert score == 0
+
+ def test_assistant_trust_score_blackout_attack_no_alert(self) -> None:
+ """
+ When 1 line is attacked at step 3 and we don't raise any alert
+ and a blackout occur at step 4, we expect a minimum score,
+ a cumulated reward equal to reward_min_blackout
+ score is otherwise 0 at other time steps
+ """
+ kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE],
+ duration=3,
+ steps_attack=[3])
+ with make(self.env_nm,
+ test=True,
+ difficulty="1",
+ opponent_attack_cooldown=0,
+ opponent_attack_duration=99999,
+ opponent_budget_per_ts=1000,
+ opponent_init_budget=10000.,
+ opponent_action_class=PlayableAction,
+ opponent_class=TestOpponent,
+ kwargs_opponent=kwargs_opponent,
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE),
+ _add_to_name="_tatsbana"
+ ) as env:
+ new_param = Parameters()
+ new_param.MAX_LINE_STATUS_CHANGED = 10
+
+ env.change_parameters(new_param)
+ env.seed(0)
+ env.reset()
+ step = 0
+ for i in range(env.max_episode_duration()):
+ attackable_line_id = 0
+ act = self.get_dn(env)
+ if step == 3:
+ act = self.get_blackout(env)
+ obs, score, done, info = env.step(act)
+ step += 1
+
+ if step in _get_steps_attack(kwargs_opponent):
+ assert info["opponent_attack_line"] is not None, f"no attack is detected at step {step}"
+ else:
+ assert info["opponent_attack_line"] is None, f"an attack is detected at step {step}"
+
+ if done:
+ assert score == DEFAULT_PARAMS_TRUSTSCORE["min_score"]
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 1 # because blackout caused by attack
+ assert total_nb_attacks == 1
+ assert env._reward_helper.template_reward.cumulated_reward == DEFAULT_PARAMS_TRUSTSCORE[
+ 'reward_min_blackout'] # -10
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks, nb_last_attacks)
+ assert cm_reward_min_ep == DEFAULT_PARAMS_TRUSTSCORE['reward_min_blackout']
+ assert cm_reward_max_ep == DEFAULT_PARAMS_TRUSTSCORE['reward_max_blackout']
+ break
+ else:
+ assert score == 0
+
+ def test_assistant_trust_score_blackout_attack_raise_alert_too_early(self) -> None :
+ """
+ When 1 line is attacked at step 3 and we raise 1 alert too early
+ and a blackout occur at step 4, we expect a minimum score,
+ a cumulated reward equal to reward_min_blackout
+ score is otherwise 0 at other time steps
+ """
+ # return -10
+ kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE],
+ duration=3,
+ steps_attack=[3])
+ with make(self.env_nm,
+ test=True,
+ difficulty="1",
+ opponent_attack_cooldown=0,
+ opponent_attack_duration=99999,
+ opponent_budget_per_ts=1000,
+ opponent_init_budget=10000.,
+ opponent_action_class=PlayableAction,
+ opponent_class=TestOpponent,
+ kwargs_opponent=kwargs_opponent,
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE),
+ _add_to_name="_tatsbarate"
+ ) as env :
+ new_param = Parameters()
+ new_param.MAX_LINE_STATUS_CHANGED = 10
+
+ env.change_parameters(new_param)
+ env.seed(0)
+ env.reset()
+ step = 0
+ for i in range(env.max_episode_duration()):
+ attackable_line_id = 0
+ act = self.get_dn(env)
+ if i == 3 :
+ act = self.get_blackout(env)
+ elif i == 1:
+ # opp attacks at step = 3, so i = 2, I raise an alert just before
+ act = env.action_space({"raise_alert": [attackable_line_id]})
+ obs, score, done, info = env.step(act)
+ step += 1
+ if step in _get_steps_attack(kwargs_opponent):
+ assert info["opponent_attack_line"] is not None, f"no attack is detected at step {step}"
+ else:
+ assert info["opponent_attack_line"] is None, f"an attack is detected at step {step}"
+
+ if done:
+ assert score == DEFAULT_PARAMS_TRUSTSCORE["min_score"]
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 1 # because blackout caused by attack
+ assert total_nb_attacks == 1
+
+ assert env._reward_helper.template_reward.cumulated_reward == DEFAULT_PARAMS_TRUSTSCORE[
+ 'reward_min_blackout']
+
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks,nb_last_attacks)
+ assert cm_reward_min_ep == DEFAULT_PARAMS_TRUSTSCORE['reward_min_blackout']
+ assert cm_reward_max_ep == DEFAULT_PARAMS_TRUSTSCORE['reward_max_blackout']
+ break
+ else :
+ assert score == 0
+
+# return 2
+ def test_assistant_trust_score_blackout_2_lines_same_step_in_window_good_alerts(self) -> None :
+ """
+ When 2 lines are attacked simustaneously at step 2 and we raise 2 alert
+ and a blackout occur at step 4, we expect a maximum score,
+ a cumulated reward equal to reward_max_blackout
+ score is otherwise 0 at other time steps
+ """
+ kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'],
+ duration=3,
+ steps_attack=[3, 3])
+ with make(self.env_nm,
+ test=True,
+ difficulty="1",
+ opponent_attack_cooldown=0,
+ opponent_attack_duration=99999,
+ opponent_budget_per_ts=1000,
+ opponent_init_budget=10000.,
+ opponent_action_class=PlayableAction,
+ opponent_class=TestOpponent,
+ kwargs_opponent=kwargs_opponent,
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE),
+ _add_to_name="_tatsb2lssiwga"
+ ) as env :
+ new_param = Parameters()
+ new_param.MAX_LINE_STATUS_CHANGED = 10
+
+ env.change_parameters(new_param)
+ env.seed(0)
+ env.reset()
+ step = 0
+ for i in range(env.max_episode_duration()):
+ attackable_line_id = 0
+ act = self.get_dn(env)
+ if i == 3 :
+ act = self.get_blackout(env)
+ elif i == 2:
+ # attack at step 3, so when i = 2 (which is the right time to send an alert)
+ act = env.action_space({"raise_alert": [0,1]})
+ obs, score, done, info = env.step(act)
+ step += 1
+
+ if step in _get_steps_attack(kwargs_opponent):
+ assert info["opponent_attack_line"] is not None, f"no attack is detected at step {step}"
+ else:
+ assert info["opponent_attack_line"] is None, f"an attack is detected at step {step}"
+
+ if done:
+ assert score == env._reward_helper.template_reward.max_score
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 1 # because blackout caused by attack
+ assert total_nb_attacks == 1 #1 because two simultaneaous attacks is considered as a signgle attack event
+
+ assert env._reward_helper.template_reward.cumulated_reward == (DEFAULT_PARAMS_TRUSTSCORE[
+ 'reward_max_blackout']+DEFAULT_PARAMS_TRUSTSCORE[
+ 'reward_max_blackout'])/2#2 here because there are two attacks at the same time, so we take the mean of the individual alert scores
+
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks,nb_last_attacks)
+ #attention, attaque dans une même fenêtre avant blackout ne compte que pour une seule attaque pondérée...
+ assert cm_reward_min_ep == DEFAULT_PARAMS_TRUSTSCORE['reward_min_blackout']
+ assert cm_reward_max_ep == DEFAULT_PARAMS_TRUSTSCORE['reward_max_blackout']
+
+ break
+ else :
+ assert score == 0
+
+ def test_assistant_trust_score_blackout_2_lines_different_step_in_window_good_alerts(self) -> None :
+ """
+ When 2 lines are attacked at different steps 3 and 4 and we raise 2 alert
+ and a blackout occur at step 4, we expect a maximum score,
+ a cumulated reward equal to (2*reward_max_blackout)/2
+ score is otherwise 0 at other time step
+ """
+ kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'],
+ duration=[1,1],
+ steps_attack=[3, 4])
+ with make(self.env_nm,
+ test=True,
+ difficulty="1",
+ opponent_attack_cooldown=0,
+ opponent_attack_duration=99999,
+ opponent_budget_per_ts=1000,
+ opponent_init_budget=10000.,
+ opponent_action_class=PlayableAction,
+ opponent_class=TestOpponentMultiLines,
+ kwargs_opponent=kwargs_opponent,
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE),
+ _add_to_name="_tatsb2ldsiwga"
+ ) as env :
+ env.seed(0)
+ obs = env.reset()
+ step = 0
+ for i in range(env.max_episode_duration()):
+ act = self.get_dn(env)
+ if i == 2 :
+ # opp attack "line 0" at step 3 so i = 2 => good alert
+ act = env.action_space({"raise_alert": [0]})
+ elif i == 3 :
+ # opp attack "line 1" at step 4 so i = 3 => good alert
+ act = env.action_space({"raise_alert": [1]})
+ elif i == 4 :
+ # trigger blackout
+ act = self.get_blackout(env)
+ obs, score, done, info = env.step(act)
+ step += 1
+
+ if step in _get_steps_attack(kwargs_opponent, multi=True):
+ assert info["opponent_attack_line"] is not None, f"no attack is detected at step {step}"
+ else:
+ assert info["opponent_attack_line"] is None, f"an attack is detected at step {step}"
+
+ if done :
+ assert score == env._reward_helper.template_reward.max_score
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 2 # because blackout caused by attacks
+ assert total_nb_attacks == 2
+
+ cm_reward = env._reward_helper.template_reward.cumulated_reward
+ assert cm_reward == (DEFAULT_PARAMS_TRUSTSCORE['reward_max_blackout'] +DEFAULT_PARAMS_TRUSTSCORE[
+ 'reward_max_blackout']) / total_nb_attacks
+
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks,nb_last_attacks)
+ # attention, attaque dans une même fenêtre avant blackout ne compte que pour une seule attaque pondérée...
+ assert cm_reward_min_ep == DEFAULT_PARAMS_TRUSTSCORE[
+ 'reward_min_blackout']
+ assert cm_reward_max_ep == DEFAULT_PARAMS_TRUSTSCORE[
+ 'reward_max_blackout']
+
+ max_score=env._reward_helper.template_reward.max_score
+ assert score == max_score
+ break
+ else :
+ assert score == 0, f"error for step {step}: {score} vs 0"
+
+ def test_assistant_trust_score_blackout_2_lines_attacked_different_step_in_window_only_1_alert_on_first_attacked_line(self) -> None:
+ """
+ When 2 lines are attacked at different steps 3 and 4 and we raise 1 alert on the first attack
+ and a blackout occur at step 4, we expect a mean score,
+ a cumulated reward equal to (reward_max_blackout + reward_min_blackout)/2
+ score is otherwise 0 at other time step
+ """
+ kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'],
+ duration=[1,1],
+ steps_attack=[3, 4])
+ with make(self.env_nm,
+ test=True,
+ difficulty="1",
+ opponent_attack_cooldown=0,
+ opponent_attack_duration=99999,
+ opponent_budget_per_ts=1000,
+ opponent_init_budget=10000.,
+ opponent_action_class=PlayableAction,
+ opponent_class=TestOpponentMultiLines,
+ kwargs_opponent=kwargs_opponent,
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE),
+ _add_to_name="_tatsb2ladsiwo1aofal"
+ ) as env :
+ env.seed(0)
+ env.reset()
+ step = 0
+ for i in range(env.max_episode_duration()):
+ act = self.get_dn(env)
+ if i == 2 :
+ # opp attack "line 0" at step 3 so i = 2 => good alert
+ act = env.action_space({"raise_alert": [0]})
+ elif i == 3 :
+ act = self.get_blackout(env)
+ obs, score, done, info = env.step(act)
+ step += 1 # i = step - 1 at this stage
+ if step in _get_steps_attack(kwargs_opponent, multi=True):
+ assert info["opponent_attack_line"] is not None, f"no attack is detected at step {step}"
+ else:
+ assert info["opponent_attack_line"] is None, f"an attack is detected at step {step}"
+
+ if done :
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 2 # because blackout caused by attacks
+ assert total_nb_attacks == 2
+
+ cm_reward = env._reward_helper.template_reward.cumulated_reward
+ assert cm_reward == (DEFAULT_PARAMS_TRUSTSCORE[
+ 'reward_max_blackout']+DEFAULT_PARAMS_TRUSTSCORE[
+ 'reward_min_blackout'])/total_nb_attacks
+
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks,nb_last_attacks)
+ #attention, attaque dans une même fenêtre avant blackout ne compte que pour une seule attaque pondérée...
+ assert cm_reward_min_ep == DEFAULT_PARAMS_TRUSTSCORE['reward_min_blackout']
+ assert cm_reward_max_ep == DEFAULT_PARAMS_TRUSTSCORE['reward_max_blackout']
+
+ mean_score=(DEFAULT_PARAMS_TRUSTSCORE['min_score']+env._reward_helper.template_reward.max_score)/2
+ assert score == mean_score
+
+ break
+ else :
+ assert score == 0, f"error for step {step}: {score} vs 0"
+
+ def test_assistant_trust_score_blackout_2_lines_attacked_different_step_in_window_only_1_alert_on_second_attacked_line(self) -> None:
+ """
+ When 2 lines are attacked at different steps 2 and 3 and we raise 1 alert on the second attack
+ and a blackout occur at step 4, we expect a mean score,
+ a cumulated reward equal to (reward_max_blackout + reward_min_blackout)/2
+ score is otherwise 0 at other time step
+ """
+ kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'],
+ duration=[1,1],
+ steps_attack=[3, 4])
+ with make(self.env_nm,
+ test=True,
+ difficulty="1",
+ opponent_attack_cooldown=0,
+ opponent_attack_duration=99999,
+ opponent_budget_per_ts=1000,
+ opponent_init_budget=10000.,
+ opponent_action_class=PlayableAction,
+ opponent_class=TestOpponentMultiLines,
+ kwargs_opponent=kwargs_opponent,
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE),
+ _add_to_name="_tatsb2ladsiwo1aosal"
+ ) as env :
+ env.seed(0)
+ env.reset()
+ step = 0
+ for i in range(env.max_episode_duration()):
+ act = self.get_dn(env)
+ if i == 3 :
+ # opp attack "line 1" at step 4 so i = 3 => good alert
+ act = env.action_space({"raise_alert": [1]})
+ elif i == 4 :
+ act = self.get_blackout(env)
+ obs, score, done, info = env.step(act)
+ step += 1
+ if step in _get_steps_attack(kwargs_opponent, multi=True):
+ assert info["opponent_attack_line"] is not None, f"no attack is detected at step {step}"
+ else:
+ assert info["opponent_attack_line"] is None, f"an attack is detected at step {step}"
+
+ if done :
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 2 # because blackout caused by attacks
+ assert total_nb_attacks == 2
+
+ cm_reward = env._reward_helper.template_reward.cumulated_reward
+ assert cm_reward == (DEFAULT_PARAMS_TRUSTSCORE[
+ 'reward_max_blackout']+DEFAULT_PARAMS_TRUSTSCORE[
+ 'reward_min_blackout'])/total_nb_attacks
+
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks,nb_last_attacks)
+ #attention, attaque dans une même fenêtre avant blackout ne compte que pour une seule attaque pondérée...
+ assert cm_reward_min_ep == DEFAULT_PARAMS_TRUSTSCORE['reward_min_blackout']
+ assert cm_reward_max_ep == DEFAULT_PARAMS_TRUSTSCORE['reward_max_blackout']
+
+ mean_score=(DEFAULT_PARAMS_TRUSTSCORE['min_score']+env._reward_helper.template_reward.max_score)/2
+ assert score == mean_score
+ break
+ else :
+ assert score == 0, f"error for step {step}: {score} vs 0"
+
+ def test_assistant_trust_score_blackout_2_lines_attacked_different_1_in_window_1_good_alert(self) -> None:
+ """
+ When 2 lines are attacked at different steps 3 and 6 and we raise 1 alert at step 5 on the second attack
+ and a blackout occur at step 6, we expect a maximum score,
+ a cumulated reward equal to reward_max_blackout + reward_max_no_blackout
+ score is otherwise 0 at other time step
+ """
+ kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'],
+ duration=[1, 1],
+ steps_attack=[3, 6])
+ with make(self.env_nm,
+ test=True,
+ difficulty="1",
+ opponent_attack_cooldown=0,
+ opponent_attack_duration=99999,
+ opponent_budget_per_ts=1000,
+ opponent_init_budget=10000.,
+ opponent_action_class=PlayableAction,
+ opponent_class=TestOpponentMultiLines,
+ kwargs_opponent=kwargs_opponent,
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE),
+ _add_to_name="_tatsb2lad1iw1ga"
+ ) as env :
+ env.seed(0)
+ env.reset()
+ step = 0
+ for i in range(env.max_episode_duration()):
+ act = self.get_dn(env)
+ if i == 5 :
+ # opp attack "line 1" at step 6 so i = 5 => good alert
+ act = env.action_space({"raise_alert": [1]})
+ elif i == 6 :
+ act = self.get_blackout(env)
+ obs, score, done, info = env.step(act)
+ step += 1
+
+ if step in _get_steps_attack(kwargs_opponent, multi=True):
+ assert info["opponent_attack_line"] is not None, f"no attack is detected at step {step}"
+ else:
+ assert info["opponent_attack_line"] is None, f"an attack is detected at step {step}"
+
+ if done :
+ assert score == env._reward_helper.template_reward.max_score
+ assert done
+
+ total_nb_attacks = env._reward_helper.template_reward.total_nb_attacks
+ nb_last_attacks = env._reward_helper.template_reward.nb_last_attacks
+
+ assert nb_last_attacks == 1 # because blackout caused by attack
+ assert total_nb_attacks == 2
+
+ assert env._reward_helper.template_reward.cumulated_reward == (DEFAULT_PARAMS_TRUSTSCORE[
+ 'reward_max_blackout']+DEFAULT_PARAMS_TRUSTSCORE[
+ 'reward_max_no_blackout'])
+
+ cm_reward_min_ep, cm_reward_max_ep = env._reward_helper.template_reward._compute_min_max_reward(
+ total_nb_attacks,nb_last_attacks)
+
+ assert cm_reward_min_ep == DEFAULT_PARAMS_TRUSTSCORE['reward_min_blackout']+DEFAULT_PARAMS_TRUSTSCORE['reward_min_no_blackout']
+ assert cm_reward_max_ep == DEFAULT_PARAMS_TRUSTSCORE['reward_max_blackout']+DEFAULT_PARAMS_TRUSTSCORE['reward_max_no_blackout']
+ break
+ else :
+ assert score == 0, f"error for step {step}: {score} vs 0"
+
+
+
+class TestRunnerAlertTrust(unittest.TestCase):
+ def setUp(self) -> None:
+ self.env_nm = os.path.join(
+ PATH_DATA_TEST, "l2rpn_idf_2023_with_alert"
+ )
+ self.env = make(self.env_nm, test=True, difficulty="1",
+ reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE))
+ self.env.seed(0)
+ return super().setUp()
+
+ def tearDown(self) -> None:
+ self.env.close()
+ return super().tearDown()
+
+ def test_dn_agent(self):
+ obs = self.env.reset()
+ runner = Runner(**self.env.get_params_for_runner())
+ res = runner.run(nb_episode=1, episode_id=[0], max_iter=10, env_seeds=[0])
+ assert np.round(res[0][2], 3) == 1. # it got to the end
+
+ def test_simagent(self):
+ # simulate blackout but act donothing
+ obs = self.env.reset()
+
+ class SimAgent(BaseAgent):
+ def act(self, observation: BaseObservation, reward: float, done: bool = False) -> BaseAction:
+ go_act = self.action_space({"set_bus": {"generators_id": [(0, -1)]}})
+ simO, simr, simd, simi = obs.simulate(go_act)
+ simO, simr, simd, simi = obs.simulate(self.action_space())
+ return super().act(observation, reward, done)
+
+ runner = Runner(**self.env.get_params_for_runner(),
+ agentClass=SimAgent)
+ res = runner.run(nb_episode=1, episode_id=[0], max_iter=10, env_seeds=[0])
+ assert np.round(res[0][2], 3) == 1.
+
+ def test_episodeData(self):
+ obs = self.env.reset()
+ runner = Runner(**self.env.get_params_for_runner())
+ res = runner.run(nb_episode=1, episode_id=[0], max_iter=10, env_seeds=[0], add_detailed_output=True)
+ assert np.round(res[0][2], 3) == 1.
+ assert np.round(res[0][5].rewards[8]) == 1.
+
+ def test_with_save(self):
+ obs = self.env.reset()
+ runner = Runner(**self.env.get_params_for_runner())
+ with tempfile.TemporaryDirectory() as f:
+ res = runner.run(nb_episode=1, episode_id=[0], max_iter=10, env_seeds=[0],
+ path_save=f)
+ assert np.round(res[0][2], 3) == 1.
+ ep0, *_ = EpisodeData.list_episode(f)
+ ep = EpisodeData.from_disk(*ep0)
+ assert np.round(ep.rewards[8]) == 1.
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/grid2op/tests/test_attached_envs.py b/grid2op/tests/test_attached_envs.py
index 92c571088..028878c94 100644
--- a/grid2op/tests/test_attached_envs.py
+++ b/grid2op/tests/test_attached_envs.py
@@ -10,10 +10,8 @@
import unittest
import grid2op
-from grid2op.Action.PowerlineSetAction import PowerlineSetAction
-from grid2op.Action.PlayableAction import PlayableAction
-from grid2op.Observation.completeObservation import CompleteObservation
-from grid2op.Action.DontAct import DontAct
+from grid2op.Action import (PowerlineSetAction, PlayableAction, DontAct)
+from grid2op.Observation import CompleteObservation
from grid2op.Opponent import GeometricOpponent
import pdb
diff --git a/grid2op/tests/test_attached_envs_compat.py b/grid2op/tests/test_attached_envs_compat.py
index d09a51015..6c1850ab6 100644
--- a/grid2op/tests/test_attached_envs_compat.py
+++ b/grid2op/tests/test_attached_envs_compat.py
@@ -13,10 +13,8 @@
import numpy as np
from grid2op.Space import GridObjects
-from grid2op.Action.PowerlineSetAction import PowerlineSetAction
-from grid2op.Action.PlayableAction import PlayableAction
-from grid2op.Observation.completeObservation import CompleteObservation
-from grid2op.Action.DontAct import DontAct
+from grid2op.Action import PowerlineSetAction, DontAct, PlayableAction
+from grid2op.Observation import CompleteObservation
import pdb
diff --git a/grid2op/tests/test_baseline_alert.py b/grid2op/tests/test_baseline_alert.py
new file mode 100644
index 000000000..d780de995
--- /dev/null
+++ b/grid2op/tests/test_baseline_alert.py
@@ -0,0 +1,66 @@
+# Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
+# See AUTHORS.txt
+# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
+# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
+# you can obtain one at http://mozilla.org/MPL/2.0/.
+# SPDX-License-Identifier: MPL-2.0
+# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
+
+import unittest
+import numpy as np
+
+
+from grid2op import make
+from grid2op.Runner import Runner
+from grid2op.Agent.alertAgent import AlertAgent
+
+# test alert agent no blackout
+class TestAlertNoBlackout(unittest.TestCase):
+ def setUp(self) -> None:
+ self.env_nm = "l2rpn_idf_2023"
+
+ def test_alert_Agent(self) -> None:
+ pct_alerts = [100./23., 100./21., 300./21., 30., 50., 80.]
+ ref_alert_counts = {pct_alerts[0]: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ pct_alerts[1]: [0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ pct_alerts[2]: [0, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ pct_alerts[3]: [0, 0, 1, 0, 3, 3, 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ pct_alerts[4]: [1, 2, 2, 0, 3, 3, 3, 3, 3, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
+ pct_alerts[5]: [1, 2, 2, 0, 3, 3, 3, 3, 3, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1],
+ }
+ # 0 in the first (no budget)
+ # one per step max in the second (budget for only 1)
+ # 3 per step max in the third (budget for only 1) with the one from the first present
+ with make(
+ self.env_nm,
+ test=True,
+ difficulty="1"
+ ) as env:
+ for percentage_alert in pct_alerts:
+ env.seed(0)
+ env.reset()
+ my_agent = AlertAgent(env.action_space, percentage_alert=percentage_alert)
+ runner = Runner(**env.get_params_for_runner(), agentClass=None ,agentInstance=my_agent)
+
+ res = runner.run(nb_episode=1, nb_process=1, path_save=None, agent_seeds=[0],env_seeds=[0], max_iter=3,
+ add_detailed_output=True)
+ id_chron, name_chron, cum_reward, nb_time_step, max_ts, episode_data = res[0]
+
+ # test if the number of alerts sent on lines are recovered
+ alerts_count = np.sum([obs.active_alert for obs in episode_data.observations[1:]]
+ ,axis=0)
+ assert(np.all(alerts_count == ref_alert_counts[percentage_alert])), f"for {percentage_alert} : {alerts_count} vs {ref_alert_counts[percentage_alert]}"
+
+ # test that we observe the expected alert rate
+ nb_alertable_lines =len(env.alertable_line_names)
+ ratio_alerts_step =np.sum(alerts_count ) /(nb_time_step*nb_alertable_lines)
+ assert(np.round(ratio_alerts_step, decimals=1) <= np.round(percentage_alert/100. ,decimals=1))
+
+ #check that alert agent is not doing any intervention on the grid in this short time frame
+ #as the reco power line, it should only do actions to reconnect lines when allowed, but cannot happen in this short time frame
+ has_action_impact=[act.impact_on_objects()['has_impact'] for act in episode_data.actions]
+ assert(~np.any(has_action_impact))
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/grid2op/tests/test_educpp_backend.py b/grid2op/tests/test_educpp_backend.py
index 0876c5651..fb1916b4f 100644
--- a/grid2op/tests/test_educpp_backend.py
+++ b/grid2op/tests/test_educpp_backend.py
@@ -13,7 +13,7 @@
import unittest
import grid2op
-from grid2op.Backend.EducPandaPowerBackend import EducPandaPowerBackend
+from grid2op.Backend.educPandaPowerBackend import EducPandaPowerBackend
import pdb
class EducPPTester(unittest.TestCase):
diff --git a/grid2op/tests/test_highres_sim_counter_in_scores.py b/grid2op/tests/test_highres_sim_counter_in_scores.py
new file mode 100644
index 000000000..dae7ae611
--- /dev/null
+++ b/grid2op/tests/test_highres_sim_counter_in_scores.py
@@ -0,0 +1,120 @@
+# Copyright (c) 2023, RTE (https://www.rte-france.com)
+# See AUTHORS.txt
+# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
+# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
+# you can obtain one at http://mozilla.org/MPL/2.0/.
+# SPDX-License-Identifier: MPL-2.0
+# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
+
+import warnings
+import numpy as np
+import unittest
+
+import grid2op
+from grid2op.Action import ActionSpace, BaseAction
+from grid2op.utils import ScoreL2RPN2023, ScoreL2RPN2022, ScoreICAPS2021, ScoreL2RPN2020
+from grid2op.Observation import BaseObservation
+from grid2op.Agent import DoNothingAgent, BaseAgent
+from grid2op.Chronics import FromHandlers
+from grid2op.Chronics.handlers import CSVHandler, PerfectForecastHandler, DoNothingHandler
+
+
+class _TesterSimulateAgent(BaseAgent):
+ def act(self, observation: BaseObservation, reward: float, done: bool = False) -> BaseAction:
+ observation.simulate(self.action_space())
+ observation.simulate(self.action_space())
+ return super().act(observation, reward, done)
+
+
+class TestHighResSimCountInScore:
+ def _score_fun(self):
+ raise RuntimeError()
+
+ def _env_name(self):
+ return "l2rpn_case14_sandbox"
+
+ def setUp(self) -> None:
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore")
+ self.env = grid2op.make(self._env_name(),
+ test=True,
+ data_feeding_kwargs={"gridvalueClass": FromHandlers,
+ "gen_p_handler": CSVHandler("prod_p"),
+ "load_p_handler": CSVHandler("load_p"),
+ "gen_v_handler": DoNothingHandler("gen_v"),
+ "load_q_handler": CSVHandler("load_q"),
+ "h_forecast": (5,),
+ "gen_p_for_handler": PerfectForecastHandler("prod_p_forecasted", quiet_warnings=True),
+ # "gen_v_for_handler": PerfectForecastHandler("prod_v_forecasted", quiet_warnings=True),
+ "load_p_for_handler": PerfectForecastHandler("load_p_forecasted", quiet_warnings=True),
+ "load_q_for_handler": PerfectForecastHandler("load_q_forecasted", quiet_warnings=True),
+ },)
+ self.env.set_max_iter(20)
+ params = self.env.parameters
+ params.NO_OVERFLOW_DISCONNECTION = True
+ params.LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION = True
+ self.seed = 0
+ self.scen_id = 0
+ self.nb_scenario = 2
+ self.max_iter = 10
+
+ def tearDown(self) -> None:
+ self.env.close()
+ return super().tearDown()
+
+ def test_score_helper(self):
+ """basic tests for ScoreL2RPN2023 class"""
+ self.env.reset()
+ my_score = self._score_fun()(
+ self.env,
+ nb_scenario=self.nb_scenario,
+ env_seeds=[0 for _ in range(self.nb_scenario)],
+ agent_seeds=[0 for _ in range(self.nb_scenario)],
+ max_step=self.max_iter,
+ add_nb_highres_sim=True)
+ try:
+ # test do nothing indeed gets 0
+ res_dn = my_score.get(DoNothingAgent(self.env.action_space))
+ assert len(res_dn) == 4
+ all_scores, ts_survived, total_ts, nb_highres_sim = res_dn
+ assert nb_highres_sim == [0] * self.nb_scenario, f"do nothing does not have 0 but {nb_highres_sim}"
+
+ # test do nothing indeed gets 2 x .
+ res_tester = my_score.get(_TesterSimulateAgent(self.env.action_space))
+ assert len(res_tester) == 4
+ all_scores, ts_survived, total_ts, nb_highres_sim = res_tester
+ assert nb_highres_sim == [2 * self.max_iter] * self.nb_scenario, f"_TesterSimulateAgent does not have 2x but {nb_highres_sim}"
+
+ finally:
+ my_score.clear_all()
+
+class TestHighResSimCountInScore2023(TestHighResSimCountInScore, unittest.TestCase):
+ def _score_fun(self):
+ return ScoreL2RPN2023
+
+ def _env_name(self):
+ return "l2rpn_idf_2023"
+
+class TestHighResSimCountInScore2022(TestHighResSimCountInScore, unittest.TestCase):
+ def _score_fun(self):
+ return ScoreL2RPN2022
+
+ def _env_name(self):
+ return "l2rpn_case14_sandbox"
+
+class TestHighResSimCountInScore2021(TestHighResSimCountInScore, unittest.TestCase):
+ def _score_fun(self):
+ return ScoreICAPS2021
+
+ def _env_name(self):
+ return "l2rpn_icaps_2021"
+
+class TestHighResSimCountInScore2020(TestHighResSimCountInScore, unittest.TestCase):
+ def _score_fun(self):
+ return ScoreL2RPN2020
+
+ def _env_name(self):
+ return "l2rpn_case14_sandbox"
+
+if __name__ == "__main__":
+ unittest.main()
\ No newline at end of file
diff --git a/grid2op/tests/test_issue_494.py b/grid2op/tests/test_issue_494.py
new file mode 100644
index 000000000..e6d9ee1b3
--- /dev/null
+++ b/grid2op/tests/test_issue_494.py
@@ -0,0 +1,36 @@
+# Copyright (c) 2023, RTE (https://www.rte-france.com)
+# See AUTHORS.txt
+# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
+# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
+# you can obtain one at http://mozilla.org/MPL/2.0/.
+# SPDX-License-Identifier: MPL-2.0
+# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
+
+import numpy as np
+import grid2op
+import unittest
+import warnings
+import pdb
+
+
+class Issue494Tester(unittest.TestCase):
+ def setUp(self) -> None:
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore")
+ self.env = grid2op.make("l2rpn_idf_2023", test=True)
+ self.env.seed(0)
+ self.env.set_id(0)
+
+ def tearDown(self) -> None:
+ self.env.close()
+ return super().tearDown()
+
+ def test_act_legal(self):
+ obs = self.env.reset()
+ for sub_id in [24, 98, 100]:
+ obs, reward, done, info = self.env.step(self.env.action_space({"set_bus": {"substations_id": [(sub_id, np.ones(type(obs).sub_info[sub_id], dtype=int))]}}))
+ assert not info["exception"], f'for {sub_id=} {info["exception"]} vs []'
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/grid2op/tests/test_l2rpn_idf_2023.py b/grid2op/tests/test_l2rpn_idf_2023.py
index b8627b3ee..b8aad0cfe 100644
--- a/grid2op/tests/test_l2rpn_idf_2023.py
+++ b/grid2op/tests/test_l2rpn_idf_2023.py
@@ -18,7 +18,7 @@
import pdb
-class L2RPNIDF2023Tester(unittest.TestCase):
+class TestL2RPNIDF2023Tester(unittest.TestCase):
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
diff --git a/grid2op/tests/test_noisy_obs.py b/grid2op/tests/test_noisy_obs.py
index 3dda12c68..f78edd668 100644
--- a/grid2op/tests/test_noisy_obs.py
+++ b/grid2op/tests/test_noisy_obs.py
@@ -13,8 +13,7 @@
import grid2op
from grid2op.Runner import Runner
-from grid2op.Observation import CompleteObservation
-from grid2op.Observation import NoisyObservation
+from grid2op.Observation import (CompleteObservation, NoisyObservation)
class TestNoisy(unittest.TestCase):
diff --git a/grid2op/tests/test_redisp_extreme.py b/grid2op/tests/test_redisp_extreme.py
index d278c19f5..9662a4f52 100644
--- a/grid2op/tests/test_redisp_extreme.py
+++ b/grid2op/tests/test_redisp_extreme.py
@@ -10,7 +10,7 @@
import os
import numpy as np
import grid2op
-from grid2op.Action.PlayableAction import PlayableAction
+from grid2op.Action.playableAction import PlayableAction
from grid2op.tests.helper_path_test import *
import unittest
diff --git a/grid2op/tests/test_score_idf_2023.py b/grid2op/tests/test_score_idf_2023.py
index a298c5545..8fbcbbb00 100644
--- a/grid2op/tests/test_score_idf_2023.py
+++ b/grid2op/tests/test_score_idf_2023.py
@@ -11,60 +11,18 @@
import unittest
import grid2op
-from grid2op.Action import ActionSpace, BaseAction
from grid2op.utils import ScoreL2RPN2023
-from grid2op.Observation import BaseObservation
-from grid2op.Agent.doNothing import DoNothingAgent, BaseAgent
-from grid2op.Chronics import FromHandlers
-from grid2op.Chronics.handlers import CSVHandler, PerfectForecastHandler
-from grid2op.Reward import _NewRenewableSourcesUsageScore
+from grid2op.Agent.doNothing import DoNothingAgent
-
-class CurtailTrackerAgent(BaseAgent):
- def __init__(self, action_space, gen_renewable, gen_pmax, curtail_level=1.):
- super().__init__(action_space)
- self.gen_renewable = gen_renewable
- self.gen_pmax = gen_pmax[gen_renewable]
- self.curtail_level = curtail_level
-
- def act(self, obs: BaseObservation, reward, done):
- curtail_target = self.curtail_level * obs.gen_p[self.gen_renewable] / self.gen_pmax
- act = self.action_space(
- {"curtail": [(el, ratio) for el, ratio in zip(np.arange(len(self.gen_renewable))[self.gen_renewable], curtail_target)]}
- )
- return act
-
-class CurtailAgent(BaseAgent):
- def __init__(self, action_space: ActionSpace, curtail_level=1.):
- self.curtail_level = curtail_level
- super().__init__(action_space)
-
- def act(self, observation: BaseObservation, reward: float, done: bool = False) -> BaseAction:
- next_gen_p = observation.simulate(self.action_space())[0].gen_p_before_curtail
- curtail = self.curtail_level * next_gen_p / observation.gen_pmax
- curtail[~observation.gen_renewable] = -1
- act = self.action_space({"curtail": curtail})
- return act
-
class TestScoreL2RPN2023(unittest.TestCase):
-
+
def setUp(self) -> None:
- env_name = "l2rpn_case14_sandbox"
+ env_name = "l2rpn_idf_2023"
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make(env_name,
- test=True,
- data_feeding_kwargs={"gridvalueClass": FromHandlers,
- "gen_p_handler": CSVHandler("prod_p"),
- "load_p_handler": CSVHandler("load_p"),
- "gen_v_handler": CSVHandler("prod_v"),
- "load_q_handler": CSVHandler("load_q"),
- "h_forecast": (5,),
- "gen_p_for_handler": PerfectForecastHandler("prod_p_forecasted", quiet_warnings=True),
- "gen_v_for_handler": PerfectForecastHandler("prod_v_forecasted", quiet_warnings=True),
- "load_p_for_handler": PerfectForecastHandler("load_p_forecasted", quiet_warnings=True),
- "load_q_for_handler": PerfectForecastHandler("load_q_forecasted", quiet_warnings=True),
- },)
+ test=True)
+
self.env.set_max_iter(20)
params = self.env.parameters
params.NO_OVERFLOW_DISCONNECTION = True
@@ -73,151 +31,42 @@ def setUp(self) -> None:
self.scen_id = 0
self.nb_scenario = 2
self.max_iter = 10
-
+
def tearDown(self) -> None:
self.env.close()
return super().tearDown()
-
+
+
def test_score_helper(self):
"""basic tests for ScoreL2RPN2023 class"""
- self.env.reset()
+ self.env.reset()
+ my_score = ScoreL2RPN2023(
+ self.env,
+ nb_scenario=self.nb_scenario,
+ env_seeds=[0 for _ in range(self.nb_scenario)],
+ agent_seeds=[0 for _ in range(self.nb_scenario)],
+ max_step=self.max_iter,
+ weight_op_score=0.6,
+ weight_assistant_score=0.25,
+ weight_nres_score=0.15,
+ scale_nres_score=100,
+ scale_assistant_score=100,
+ min_nres_score=-100.,
+ min_assistant_score=-300)
try:
- my_score = ScoreL2RPN2023(
- self.env,
- nb_scenario=self.nb_scenario,
- env_seeds=[0 for _ in range(self.nb_scenario)],
- agent_seeds=[0 for _ in range(self.nb_scenario)],
- max_step=self.max_iter,
- weight_op_score=0.8,
- weight_assistant_score=0,
- weight_nres_score=0.2,
- scale_nres_score=100,
- scale_assistant_score=100,
- min_nres_score=-300.)
-
# test do nothing indeed gets 100.
res_dn = my_score.get(DoNothingAgent(self.env.action_space))
- for scen_id, (ep_score, op_score, nres_score, assistant_confidence_score, assistant_cost_score) in enumerate(res_dn[0]):
+ for scen_id, (ep_score, op_score, nres_score, assistant_score) in enumerate(res_dn[0]):
assert nres_score == 100.
- assert ep_score == 0.8 * op_score + 0.2 * nres_score
-
- # now test that the score decrease fast "at beginning" and slower "at the end"
- # ie from 1. to 0.95 bigger difference than from 0.8 to 0.7
- res_agent0 = my_score.get(CurtailTrackerAgent(self.env.action_space,
- gen_renewable = self.env.gen_renewable,
- gen_pmax=self.env.gen_pmax,
- curtail_level = 0.95))
- # assert np.allclose(res_agent0[0][0][2], 81.83611011377577)
- # assert np.allclose(res_agent0[0][1][2], 68.10026022372575)
- assert np.allclose(res_agent0[0][0][0], 0.8 * res_agent0[0][0][1] + 0.2 * res_agent0[0][0][2])
- assert np.allclose(res_agent0[0][0][2], 16.73128726588182)
- assert np.allclose(res_agent0[0][1][2], -26.02070223995034)
-
- res_agent1 = my_score.get(CurtailTrackerAgent(self.env.action_space,
- gen_renewable = self.env.gen_renewable,
- gen_pmax=self.env.gen_pmax,
- curtail_level = 0.9))
- # assert np.allclose(res_agent1[0][0][2], 56.256863965501466)
- # assert np.allclose(res_agent1[0][1][2], 43.370607328810415)
- assert np.allclose(res_agent1[0][0][2], -49.61104170080321)
- assert np.allclose(res_agent1[0][1][2], -78.00216266500183)
-
- # decrease
- assert 100. - res_agent0[0][0][2] >= res_agent0[0][0][2] - res_agent1[0][0][2]
- assert 100. - res_agent0[0][1][2] >= res_agent0[0][1][2] - res_agent1[0][1][2]
-
- res_agent2 = my_score.get(CurtailTrackerAgent(self.env.action_space,
- gen_renewable = self.env.gen_renewable,
- gen_pmax=self.env.gen_pmax,
- curtail_level = 0.8))
- assert np.allclose(res_agent2[0][0][2], -127.62213025108333)
- assert np.allclose(res_agent2[0][1][2], -143.83405253996978)
- # decrease
- assert 100. - res_agent1[0][0][2] >= res_agent1[0][0][2] - res_agent2[0][0][2]
- assert 100. - res_agent1[0][1][2] >= res_agent1[0][1][2] - res_agent2[0][1][2]
-
- res_agent3 = my_score.get(CurtailTrackerAgent(self.env.action_space,
- gen_renewable = self.env.gen_renewable,
- gen_pmax=self.env.gen_pmax,
- curtail_level = 0.7))
- assert np.allclose(res_agent3[0][0][2], -169.9519401162611)
- assert np.allclose(res_agent3[0][1][2], -179.45065441917586)
- assert res_agent1[0][0][2] - res_agent2[0][0][2] >= res_agent2[0][0][2] - res_agent2[0][0][2]
- assert res_agent1[0][1][2] - res_agent2[0][1][2] >= res_agent2[0][1][2] - res_agent2[0][1][2]
- finally:
- my_score.clear_all()
+ assert ep_score == 0.6 * op_score + 0.15 * nres_score + 0.25 * assistant_score
+ assert assistant_score == 100. # no blackout with no disconnections
+ assert op_score == 0
+
- def test_min_score(self):
- """test the score does not go bellow the minimum in input"""
- try:
- self.env.reset()
- my_score = ScoreL2RPN2023(
- self.env,
- nb_scenario=self.nb_scenario,
- env_seeds=[0 for _ in range(self.nb_scenario)],
- agent_seeds=[0 for _ in range(self.nb_scenario)],
- max_step=self.max_iter,
- weight_op_score=0.8,
- weight_assistant_score=0,
- weight_nres_score=0.2,
- scale_nres_score=100,
- scale_assistant_score=100,
- min_nres_score=-100.)
-
- res_agent3 = my_score.get(CurtailTrackerAgent(self.env.action_space,
- gen_renewable = self.env.gen_renewable,
- gen_pmax=self.env.gen_pmax,
- curtail_level = 0.7))
- # assert np.allclose(res_agent3[0][0][2], -169.9519401162611)
- # assert np.allclose(res_agent3[0][1][2], -179.45065441917586)
- assert np.allclose(res_agent3[0][0][2], -100.)
- assert np.allclose(res_agent3[0][1][2], -100.)
- finally:
- my_score.clear_all()
-
- def test_spec(self):
- """ spec are: 100pts for 0 curtailment, 0 pts for 80% renewable (20% curtailment) and -100 pts for 50% renewable"""
-
- # test function without actual data
- assert _NewRenewableSourcesUsageScore._surlinear_func_curtailment(100.) == 1.
- assert _NewRenewableSourcesUsageScore._surlinear_func_curtailment(80.) == 0.
- assert _NewRenewableSourcesUsageScore._surlinear_func_curtailment(50.) == -1.
- assert _NewRenewableSourcesUsageScore._surlinear_func_curtailment(0.) < _NewRenewableSourcesUsageScore._surlinear_func_curtailment(50.)
-
- try:
- # now test with "real" data
- my_score = ScoreL2RPN2023(
- self.env,
- nb_scenario=self.nb_scenario,
- env_seeds=[0 for _ in range(self.nb_scenario)],
- agent_seeds=[0 for _ in range(self.nb_scenario)],
- max_step=self.max_iter,
- weight_op_score=0.8,
- weight_assistant_score=0,
- weight_nres_score=0.2)
- tol = 3e-5
- # test do nothing indeed gets 100.
- res_dn = my_score.get(DoNothingAgent(self.env.action_space))
- for scen_id, (ep_score, op_score, nres_score, assistant_confidence_score, assistant_cost_score) in enumerate(res_dn[0]):
- assert abs(nres_score - 100.) <= tol
-
- # test 80% gets indeed close to 0
- res_80 = my_score.get(CurtailAgent(self.env.action_space, 0.8))
- for scen_id, (ep_score, op_score, nres_score, assistant_confidence_score, assistant_cost_score) in enumerate(res_80[0]):
- assert abs(nres_score) <= tol
-
- # test 50% gets indeed close to -100
- res_50 = my_score.get(CurtailAgent(self.env.action_space, 0.5))
- for scen_id, (ep_score, op_score, nres_score, assistant_confidence_score, assistant_cost_score) in enumerate(res_50[0]):
- assert abs(nres_score + 100.) <= tol
-
- # test bellow 50% still gets close to -100
- res_30 = my_score.get(CurtailAgent(self.env.action_space, 0.3))
- for scen_id, (ep_score, op_score, nres_score, assistant_confidence_score, assistant_cost_score) in enumerate(res_30[0]):
- assert abs(nres_score + 100.) <= tol
finally:
my_score.clear_all()
-
+
+
if __name__ == "__main__":
unittest.main()
\ No newline at end of file
diff --git a/grid2op/tests/test_score_idf_2023_assistant.py b/grid2op/tests/test_score_idf_2023_assistant.py
new file mode 100644
index 000000000..4de9fee57
--- /dev/null
+++ b/grid2op/tests/test_score_idf_2023_assistant.py
@@ -0,0 +1,142 @@
+# Copyright (c) 2023, RTE (https://www.rte-france.com)
+# See AUTHORS.txt
+# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
+# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
+# you can obtain one at http://mozilla.org/MPL/2.0/.
+# SPDX-License-Identifier: MPL-2.0
+# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
+
+import warnings
+import numpy as np
+import unittest
+
+import grid2op
+from grid2op.utils import ScoreL2RPN2023
+from grid2op.Agent.doNothing import DoNothingAgent, BaseAgent
+from grid2op.Observation import BaseObservation
+from grid2op.Action import BaseAction
+from grid2op.Chronics import FromHandlers
+from grid2op.Chronics.handlers import CSVHandler, PerfectForecastHandler
+from _aux_opponent_for_test_alerts import TestOpponent, _get_blackout
+
+class Alert_Blackout_Agent(BaseAgent):
+ def __init__(self, action_space,do_Alerts=False,blackout_step=None):
+ super().__init__(action_space)
+ self.do_Alerts = do_Alerts
+ self.blackout_step = blackout_step
+
+ def act(self, observation: BaseObservation, reward: float, done: bool = False) -> BaseAction:
+ act=self.action_space({})
+
+ if self.do_Alerts:
+ act+=self.action_space({"raise_alert": [i for i in range(len(observation.alertable_line_ids))]})#we don't know which line will get attacked, so we raise all alerts to be sure to raise an alert for the line attacked
+
+ if((self.blackout_step is not None) and (observation.current_step == self.blackout_step)):
+ #blackout_action = self.action_space({})
+ #blackout_action.gen_set_bus = [(0, -1)]
+ act+=_get_blackout(self.action_space)
+
+ return act
+
+class TestScoreL2RPN2023Assist(unittest.TestCase):
+ """test the "assistant" part of the l2rpn_idf_2023"""
+ def setUp(self) -> None:
+ env_name = "l2rpn_idf_2023"
+ ATTACKED_LINE = "48_50_136"
+
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore")
+
+ self.env = grid2op.make(env_name,
+ test=True)
+
+ self.env.set_max_iter(30)
+ params = self.env.parameters
+ params.NO_OVERFLOW_DISCONNECTION = True
+ params.LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION = True
+ self.seed = 0
+ self.scen_id = 0
+ self.nb_scenario = 2
+ self.max_iter = 30 #if attacks are at timestep 13, needs at least 26 timsteps to get beyond the alert time window and see some relevant score in case of no blackout
+
+ def tearDown(self) -> None:
+ self.env.close()
+ return super().tearDown()
+
+
+ def test_score_helper(self):
+ """basic tests for ScoreL2RPN2023 class for assistant score with nres score set to 0"
+ With those seeds, we observe an attack in both episodes at timestep 13"""
+ self.env.reset()
+ my_score = ScoreL2RPN2023(
+ self.env,
+ nb_scenario=self.nb_scenario,
+ env_seeds=[0 for _ in range(self.nb_scenario)],
+ agent_seeds=[0 for _ in range(self.nb_scenario)],
+ max_step=self.max_iter,
+ weight_op_score=0.6,
+ weight_assistant_score=0.4,
+ weight_nres_score=0.,
+ scale_nres_score=100,
+ scale_assistant_score=100,
+ min_nres_score=-100.,
+ min_assistant_score=-300)
+ try:
+
+ # test do nothing indeed gets 100.
+ res_dn = my_score.get(DoNothingAgent(self.env.action_space))
+ for scen_id, (ep_score, op_score, nres_score, assistant_score) in enumerate(res_dn[0]):
+ assert assistant_score == 100. #no blackout with no disconnections
+ assert ep_score == 0.6 * op_score + 0.4 * assistant_score
+
+ #raising alerts for attack but it should not as it gets no blackout. With the score L2RPN IDF parametrization, it gives a score of -300
+ res_agent = my_score.get(Alert_Blackout_Agent(self.env.action_space, do_Alerts=True))#attacks are at timestep 13 for both scenarios with those seeds
+ for scen_id, (ep_score, op_score, nres_score, assistant_score) in enumerate(res_agent[0]):
+ assert(assistant_score == -300)
+ assert ep_score == 0.6 * op_score + 0.4 * assistant_score
+
+ #raising no alert for attack and it gets no blackout. With the score L2RPN IDF parametrization, it gives a score of 100
+ res_agent = my_score.get(Alert_Blackout_Agent(self.env.action_space, do_Alerts=False))#attacks are at timestep 13 for both scenarios with those seeds
+ for scen_id, (ep_score, op_score, nres_score, assistant_score) in enumerate(res_agent[0]):
+ assert(assistant_score == 100)
+ assert ep_score == 0.6 * op_score + 0.4 * assistant_score
+
+ #raising alert for attack and it gets a blackout. With the score L2RPN IDF parametrization, it gives a score of 100
+ res_agent = my_score.get(Alert_Blackout_Agent(self.env.action_space, do_Alerts=True,blackout_step=15))#attacks are at timestep 13 for both scenarios with those seeds
+ for scen_id, (ep_score, op_score, nres_score, assistant_score) in enumerate(res_agent[0]):
+ assert(assistant_score == 100)
+ assert ep_score == 0.6 * op_score + 0.4 * assistant_score
+
+
+ finally:
+ my_score.clear_all()
+
+ def test_min_score(self):
+ """test the score does not go bellow the minimum in input
+ With those seeds, we observe an attack in both episodes at timestep 13"""
+
+ try:
+ self.env.reset()
+ my_score = ScoreL2RPN2023(
+ self.env,
+ nb_scenario=self.nb_scenario,
+ env_seeds=[0 for _ in range(self.nb_scenario)],
+ agent_seeds=[0 for _ in range(self.nb_scenario)],
+ max_step=self.max_iter,
+ weight_op_score=0.8,
+ weight_assistant_score=0,
+ weight_nres_score=0.2,
+ scale_nres_score=100,
+ scale_assistant_score=100,
+ min_nres_score=-100.,
+ min_assistant_score=-300)
+
+ res_agent = my_score.get(Alert_Blackout_Agent(self.env.action_space, blackout_step=14))
+ for scen_id, (ep_score, op_score, nres_score, assistant_score) in enumerate(res_agent[0]):
+ assert(assistant_score == -300)#gets minimum score because blackout after attack with no alerts
+ finally:
+ my_score.clear_all()
+
+
+if __name__ == "__main__":
+ unittest.main()
\ No newline at end of file
diff --git a/grid2op/tests/test_score_idf_2023_nres.py b/grid2op/tests/test_score_idf_2023_nres.py
new file mode 100644
index 000000000..61537814f
--- /dev/null
+++ b/grid2op/tests/test_score_idf_2023_nres.py
@@ -0,0 +1,223 @@
+# Copyright (c) 2023, RTE (https://www.rte-france.com)
+# See AUTHORS.txt
+# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
+# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
+# you can obtain one at http://mozilla.org/MPL/2.0/.
+# SPDX-License-Identifier: MPL-2.0
+# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
+
+import warnings
+import numpy as np
+import unittest
+
+import grid2op
+from grid2op.Action import ActionSpace, BaseAction
+from grid2op.utils import ScoreL2RPN2023
+from grid2op.Observation import BaseObservation
+from grid2op.Agent.doNothing import DoNothingAgent, BaseAgent
+from grid2op.Chronics import FromHandlers
+from grid2op.Chronics.handlers import CSVHandler, PerfectForecastHandler
+from grid2op.Reward import _NewRenewableSourcesUsageScore
+
+
+class CurtailTrackerAgent(BaseAgent):
+ def __init__(self, action_space, gen_renewable, gen_pmax, curtail_level=1.):
+ super().__init__(action_space)
+ self.gen_renewable = gen_renewable
+ self.gen_pmax = gen_pmax[gen_renewable]
+ self.curtail_level = curtail_level
+
+ def act(self, obs: BaseObservation, reward, done):
+ curtail_target = self.curtail_level * obs.gen_p[self.gen_renewable] / self.gen_pmax
+ act = self.action_space(
+ {"curtail": [(el, ratio) for el, ratio in zip(np.arange(len(self.gen_renewable))[self.gen_renewable], curtail_target)]}
+ )
+ return act
+
+class CurtailAgent(BaseAgent):
+ def __init__(self, action_space: ActionSpace, curtail_level=1.):
+ self.curtail_level = curtail_level
+ super().__init__(action_space)
+
+ def act(self, observation: BaseObservation, reward: float, done: bool = False) -> BaseAction:
+ next_gen_p = observation.simulate(self.action_space())[0].gen_p_before_curtail
+ curtail = self.curtail_level * next_gen_p / observation.gen_pmax
+ curtail[~observation.gen_renewable] = -1
+ act = self.action_space({"curtail": curtail})
+ return act
+
+class TestScoreL2RPN2023NRES(unittest.TestCase):
+ """test the "nres" part of the l2rpn_idf_2023"""
+ def setUp(self) -> None:
+ env_name = "l2rpn_case14_sandbox"
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore")
+ self.env = grid2op.make(env_name,
+ test=True,
+ data_feeding_kwargs={"gridvalueClass": FromHandlers,
+ "gen_p_handler": CSVHandler("prod_p"),
+ "load_p_handler": CSVHandler("load_p"),
+ "gen_v_handler": CSVHandler("prod_v"),
+ "load_q_handler": CSVHandler("load_q"),
+ "h_forecast": (5,),
+ "gen_p_for_handler": PerfectForecastHandler("prod_p_forecasted", quiet_warnings=True),
+ "gen_v_for_handler": PerfectForecastHandler("prod_v_forecasted", quiet_warnings=True),
+ "load_p_for_handler": PerfectForecastHandler("load_p_forecasted", quiet_warnings=True),
+ "load_q_for_handler": PerfectForecastHandler("load_q_forecasted", quiet_warnings=True),
+ },)
+ self.env.set_max_iter(20)
+ params = self.env.parameters
+ params.NO_OVERFLOW_DISCONNECTION = True
+ params.LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION = True
+ self.seed = 0
+ self.scen_id = 0
+ self.nb_scenario = 2
+ self.max_iter = 10
+
+ def tearDown(self) -> None:
+ self.env.close()
+ return super().tearDown()
+
+ def test_score_helper(self):
+ """basic tests for ScoreL2RPN2023 class"""
+ self.env.reset()
+ try:
+ my_score = ScoreL2RPN2023(
+ self.env,
+ nb_scenario=self.nb_scenario,
+ env_seeds=[0 for _ in range(self.nb_scenario)],
+ agent_seeds=[0 for _ in range(self.nb_scenario)],
+ max_step=self.max_iter,
+ weight_op_score=0.8,
+ weight_assistant_score=0,
+ weight_nres_score=0.2,
+ scale_nres_score=100,
+ scale_assistant_score=100,
+ min_nres_score=-300.)
+
+ # test do nothing indeed gets 100.
+ res_dn = my_score.get(DoNothingAgent(self.env.action_space))
+ for scen_id, (ep_score, op_score, nres_score, assistant_score) in enumerate(res_dn[0]):
+ assert nres_score == 100.
+ assert ep_score == 0.8 * op_score + 0.2 * nres_score
+
+ # now test that the score decrease fast "at beginning" and slower "at the end"
+ # ie from 1. to 0.95 bigger difference than from 0.8 to 0.7
+ res_agent0 = my_score.get(CurtailTrackerAgent(self.env.action_space,
+ gen_renewable = self.env.gen_renewable,
+ gen_pmax=self.env.gen_pmax,
+ curtail_level = 0.95))
+ # assert np.allclose(res_agent0[0][0][2], 81.83611011377577)
+ # assert np.allclose(res_agent0[0][1][2], 68.10026022372575)
+ assert np.allclose(res_agent0[0][0][0], 0.8 * res_agent0[0][0][1] + 0.2 * res_agent0[0][0][2])
+ assert np.allclose(res_agent0[0][0][2], 16.73128726588182)
+ assert np.allclose(res_agent0[0][1][2], -26.02070223995034)
+
+ res_agent1 = my_score.get(CurtailTrackerAgent(self.env.action_space,
+ gen_renewable = self.env.gen_renewable,
+ gen_pmax=self.env.gen_pmax,
+ curtail_level = 0.9))
+ # assert np.allclose(res_agent1[0][0][2], 56.256863965501466)
+ # assert np.allclose(res_agent1[0][1][2], 43.370607328810415)
+ assert np.allclose(res_agent1[0][0][2], -49.61104170080321)
+ assert np.allclose(res_agent1[0][1][2], -78.00216266500183)
+
+ # decrease
+ assert 100. - res_agent0[0][0][2] >= res_agent0[0][0][2] - res_agent1[0][0][2]
+ assert 100. - res_agent0[0][1][2] >= res_agent0[0][1][2] - res_agent1[0][1][2]
+
+ res_agent2 = my_score.get(CurtailTrackerAgent(self.env.action_space,
+ gen_renewable = self.env.gen_renewable,
+ gen_pmax=self.env.gen_pmax,
+ curtail_level = 0.8))
+ assert np.allclose(res_agent2[0][0][2], -127.62213025108333)
+ assert np.allclose(res_agent2[0][1][2], -143.83405253996978)
+ # decrease
+ assert 100. - res_agent1[0][0][2] >= res_agent1[0][0][2] - res_agent2[0][0][2]
+ assert 100. - res_agent1[0][1][2] >= res_agent1[0][1][2] - res_agent2[0][1][2]
+
+ res_agent3 = my_score.get(CurtailTrackerAgent(self.env.action_space,
+ gen_renewable = self.env.gen_renewable,
+ gen_pmax=self.env.gen_pmax,
+ curtail_level = 0.7))
+ assert np.allclose(res_agent3[0][0][2], -169.9519401162611)
+ assert np.allclose(res_agent3[0][1][2], -179.45065441917586)
+ assert res_agent1[0][0][2] - res_agent2[0][0][2] >= res_agent2[0][0][2] - res_agent2[0][0][2]
+ assert res_agent1[0][1][2] - res_agent2[0][1][2] >= res_agent2[0][1][2] - res_agent2[0][1][2]
+ finally:
+ my_score.clear_all()
+
+ def test_min_score(self):
+ """test the score does not go bellow the minimum in input"""
+ try:
+ self.env.reset()
+ my_score = ScoreL2RPN2023(
+ self.env,
+ nb_scenario=self.nb_scenario,
+ env_seeds=[0 for _ in range(self.nb_scenario)],
+ agent_seeds=[0 for _ in range(self.nb_scenario)],
+ max_step=self.max_iter,
+ weight_op_score=0.8,
+ weight_assistant_score=0,
+ weight_nres_score=0.2,
+ scale_nres_score=100,
+ scale_assistant_score=100,
+ min_nres_score=-100.)
+
+ res_agent3 = my_score.get(CurtailTrackerAgent(self.env.action_space,
+ gen_renewable = self.env.gen_renewable,
+ gen_pmax=self.env.gen_pmax,
+ curtail_level = 0.7))
+ # assert np.allclose(res_agent3[0][0][2], -169.9519401162611)
+ # assert np.allclose(res_agent3[0][1][2], -179.45065441917586)
+ assert np.allclose(res_agent3[0][0][2], -100.)
+ assert np.allclose(res_agent3[0][1][2], -100.)
+ finally:
+ my_score.clear_all()
+
+ def test_spec(self):
+ """ spec are: 100pts for 0 curtailment, 0 pts for 80% renewable (20% curtailment) and -100 pts for 50% renewable"""
+
+ # test function without actual data
+ assert _NewRenewableSourcesUsageScore._surlinear_func_curtailment(100.) == 1.
+ assert _NewRenewableSourcesUsageScore._surlinear_func_curtailment(80.) == 0.
+ assert _NewRenewableSourcesUsageScore._surlinear_func_curtailment(50.) == -1.
+ assert _NewRenewableSourcesUsageScore._surlinear_func_curtailment(0.) < _NewRenewableSourcesUsageScore._surlinear_func_curtailment(50.)
+
+ try:
+ # now test with "real" data
+ my_score = ScoreL2RPN2023(
+ self.env,
+ nb_scenario=self.nb_scenario,
+ env_seeds=[0 for _ in range(self.nb_scenario)],
+ agent_seeds=[0 for _ in range(self.nb_scenario)],
+ max_step=self.max_iter,
+ weight_op_score=0.8,
+ weight_assistant_score=0,
+ weight_nres_score=0.2)
+ tol = 3e-5
+ # test do nothing indeed gets 100.
+ res_dn = my_score.get(DoNothingAgent(self.env.action_space))
+ for scen_id, (ep_score, op_score, nres_score, assistant_score) in enumerate(res_dn[0]):
+ assert abs(nres_score - 100.) <= tol
+
+ # test 80% gets indeed close to 0
+ res_80 = my_score.get(CurtailAgent(self.env.action_space, 0.8))
+ for scen_id, (ep_score, op_score, nres_score, assistant_score) in enumerate(res_80[0]):
+ assert abs(nres_score) <= tol
+
+ # test 50% gets indeed close to -100
+ res_50 = my_score.get(CurtailAgent(self.env.action_space, 0.5))
+ for scen_id, (ep_score, op_score, nres_score, assistant_score) in enumerate(res_50[0]):
+ assert abs(nres_score + 100.) <= tol
+
+ # test bellow 50% still gets close to -100
+ res_30 = my_score.get(CurtailAgent(self.env.action_space, 0.3))
+ for scen_id, (ep_score, op_score, nres_score, assistant_score) in enumerate(res_30[0]):
+ assert abs(nres_score + 100.) <= tol
+ finally:
+ my_score.clear_all()
+
+
+if __name__ == "__main__":
+ unittest.main()
\ No newline at end of file
diff --git a/grid2op/tests/test_score_wcci_2022.py b/grid2op/tests/test_score_wcci_2022.py
index b9d35f647..307c30b4c 100644
--- a/grid2op/tests/test_score_wcci_2022.py
+++ b/grid2op/tests/test_score_wcci_2022.py
@@ -11,8 +11,7 @@
import numpy as np
import grid2op
-from grid2op.Agent.baseAgent import BaseAgent
-from grid2op.Agent.doNothing import DoNothingAgent
+from grid2op.Agent import (BaseAgent, DoNothingAgent)
from grid2op.Reward import L2RPNWCCI2022ScoreFun
from grid2op.utils import ScoreL2RPN2022
diff --git a/grid2op/tests/test_simenv_blackout.py b/grid2op/tests/test_simenv_blackout.py
new file mode 100644
index 000000000..7ec3c1bef
--- /dev/null
+++ b/grid2op/tests/test_simenv_blackout.py
@@ -0,0 +1,65 @@
+# Copyright (c) 2019-2022, RTE (https://www.rte-france.com)
+# See AUTHORS.txt
+# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
+# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
+# you can obtain one at http://mozilla.org/MPL/2.0/.
+# SPDX-License-Identifier: MPL-2.0
+# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
+
+import unittest
+import warnings
+
+import grid2op
+from grid2op.Reward import BaseReward
+
+
+class SimEnvRewardTester(BaseReward):
+ def reset(self, env):
+ self._sim_env = self.is_simulated_env(env)
+ return super().reset(env)
+
+ def initialize(self, env):
+ self._sim_env = self.is_simulated_env(env)
+ return super().initialize(env)
+
+ def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
+ if self._sim_env:
+ return -1.
+ return 1.
+
+
+class TestIsSimulatedEnv(unittest.TestCase):
+ def setUp(self) -> None:
+ env_name = "l2rpn_case14_sandbox"
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore")
+ self.env = grid2op.make(env_name, test=True, reward_class=SimEnvRewardTester)
+ return super().setUp()
+
+ def tearDown(self) -> None:
+ self.env.close()
+ return super().tearDown()
+
+ def test_simulate(self):
+ obs = self.env.reset()
+ obs, reward, done, info = self.env.step(self.env.action_space())
+ assert reward == 1., f"{reward} vs 1."
+ sim_o, sim_r, *_ = obs.simulate(self.env.action_space())
+ assert sim_r == -1., f"{reward} vs -1."
+ sim_o, sim_r, sim_d, sim_i = obs.simulate(self.env.action_space({"set_bus": {"loads_id": [(0, -1)]}}))
+ assert sim_d
+ assert sim_r == -1., f"{reward} vs -1."
+
+ def test_forecast_env(self):
+ obs = self.env.reset()
+ for_env = obs.get_forecast_env()
+ for_d = False
+ i = 0
+ while not for_d:
+ i += 1
+ for_o, for_r, for_d, for_i = for_env.step(self.env.action_space())
+ assert for_r == -1.0, f"{for_r} vs -1. for iter {i}"
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/grid2op/tests/test_simulate_disco_load.py b/grid2op/tests/test_simulate_disco_load.py
new file mode 100644
index 000000000..c0412bc7c
--- /dev/null
+++ b/grid2op/tests/test_simulate_disco_load.py
@@ -0,0 +1,64 @@
+# Copyright (c) 2023, RTE (https://www.rte-france.com)
+# See AUTHORS.txt
+# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
+# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
+# you can obtain one at http://mozilla.org/MPL/2.0/.
+# SPDX-License-Identifier: MPL-2.0
+# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
+
+
+import numpy as np
+import unittest
+import warnings
+
+import grid2op
+from lightsim2grid import LightSimBackend
+
+import pdb
+
+class TestSimulateDiscoLoad(unittest.TestCase):
+ def setUp(self) -> None:
+ """its important to keep the lightims2grid backend here. It tests indirectly that the objects
+ are properly set to "unchanged" without actually having to check the _BackendAction of
+ the obs.simulate underlying backend, which is quite annoying"""
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore")
+ # this needs to be tested with pandapower backend
+ # self.env = grid2op.make("l2rpn_case14_sandbox", backend=LightSimBackend(), test=True) # TODO when lightsim will be fixed !
+ self.env = grid2op.make("l2rpn_case14_sandbox", test=True)
+ self.env.seed(0)
+ self.env.set_id(0)
+
+ def tearDown(self) -> None:
+ self.env.close()
+ return super().tearDown()
+
+ def test_simulate_ok(self):
+ obs = self.env.reset()
+ simo, simr, simd, simi = obs.simulate(self.env.action_space())
+ assert not simd
+
+ simo, simr, simd, simi = obs.simulate(self.env.action_space({"set_bus": {"loads_id": [(0, -1)]}}))
+ assert simd
+
+ simo, simr, simd, simi = obs.simulate(self.env.action_space())
+ assert not simd
+
+ def test_backend_action(self):
+ obs = self.env.reset()
+ simo, simr, simd, simi = obs.simulate(self.env.action_space())
+ l_id = 0
+ l_pos = type(self.env).load_pos_topo_vect[l_id]
+ assert obs._obs_env._backend_action_set.current_topo.values[l_pos] == 1
+ assert obs._obs_env._backend_action_set.load_p.changed[l_id]
+ assert np.allclose(obs._obs_env._backend_action_set.load_p.values[l_id], 22.3), f"{obs._obs_env._backend_action_set.load_p.values[l_id]:.2f} vs 22.3"
+
+ obs._obs_env._backend_action_set += self.env.action_space({"set_bus": {"loads_id": [(l_id, -1)]}})
+ assert obs._obs_env._backend_action_set.current_topo.values[l_pos] == -1
+ tmp = obs._obs_env._backend_action_set() # do as if the action has been processed
+ assert not obs._obs_env._backend_action_set.load_p.changed[l_id] # it's disconnected, so marked as unchanged now
+ assert np.allclose(obs._obs_env._backend_action_set.load_p.values[l_id], 22.3), f"{obs._obs_env._backend_action_set.load_p.values[l_id]:.2f} vs 22.3"
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/grid2op/tests/test_ts_handlers.py b/grid2op/tests/test_ts_handlers.py
index 8d0575e3c..e1626457c 100644
--- a/grid2op/tests/test_ts_handlers.py
+++ b/grid2op/tests/test_ts_handlers.py
@@ -15,8 +15,7 @@
import grid2op
from grid2op.Exceptions import NoForecastAvailable
-from grid2op.Chronics import GridStateFromFileWithForecasts, GridStateFromFile, GridStateFromFileWithForecastsWithoutMaintenance
-from grid2op.Chronics.time_series_from_handlers import FromHandlers
+from grid2op.Chronics import GridStateFromFileWithForecasts, GridStateFromFile, GridStateFromFileWithForecastsWithoutMaintenance, FromHandlers
from grid2op.Chronics.handlers import (CSVHandler,
DoNothingHandler,
CSVForecastHandler,
diff --git a/grid2op/utils/icaps_2021_scores.py b/grid2op/utils/icaps_2021_scores.py
index d9b69e1f7..fbe3dd455 100644
--- a/grid2op/utils/icaps_2021_scores.py
+++ b/grid2op/utils/icaps_2021_scores.py
@@ -95,6 +95,7 @@ def __init__(
scale_alarm_score=100.0,
weight_op_score=0.7,
weight_alarm_score=0.3,
+ add_nb_highres_sim=False,
):
ScoreL2RPN2020.__init__(
@@ -112,6 +113,7 @@ def __init__(
"alarm_cost": _AlarmScore,
},
score_names=["grid_operational_cost_scores", "alarm_cost_scores"],
+ add_nb_highres_sim=add_nb_highres_sim,
)
self.scale_alarm_score = scale_alarm_score
self.weight_op_score = weight_op_score
@@ -124,7 +126,7 @@ def _compute_episode_score(
other_rewards,
dn_metadata,
no_ov_metadata,
- score_file_to_use=None,
+ score_file_to_use="grid_operational_cost_scores",
):
"""
Performs the rescaling of the score given the information stored in the "statistics" of this
@@ -144,7 +146,7 @@ def _compute_episode_score(
# score_file_to_use should match the
# L2RPNSandBoxScore key in
# self.scores_func
- score_file_to_use="grid_operational_cost_scores",
+ score_file_to_use=score_file_to_use
)
# should match underlying_statistics.run_env `dict_kwg["other_rewards"][XXX] = ...`
# XXX is right now f"{EpisodeStatistics.KEY_SCORE}_{nm}" [this should match the XXX]
diff --git a/grid2op/utils/l2rpn_2020_scores.py b/grid2op/utils/l2rpn_2020_scores.py
index 2707af710..77d63c5b1 100644
--- a/grid2op/utils/l2rpn_2020_scores.py
+++ b/grid2op/utils/l2rpn_2020_scores.py
@@ -91,6 +91,7 @@ def __init__(
nb_process_stats=1,
scores_func=L2RPNSandBoxScore,
score_names=None,
+ add_nb_highres_sim=False,
):
self.env = env
self.nb_scenario = nb_scenario
@@ -129,6 +130,7 @@ def __init__(
agent=agent_reco,
score_names=score_names,
)
+ self.add_nb_highres_sim = add_nb_highres_sim
self.__cleared = False
def _init_stat(
@@ -242,8 +244,8 @@ def _compute_episode_score(
min_losses_ratio = self.min_losses_ratio
# remember that first observation do not count (it's generated by the environment)
- ep_loads = np.sum(load_p_rp[ids == ep_id, :], axis=1)[1:]
- ep_losses = np.sum(prod_p_rp[ids == ep_id, :], axis=1)[1:] - ep_loads
+ ep_loads = load_p_rp[ids == ep_id, :].sum(axis=1)[1:]
+ ep_losses = prod_p_rp[ids == ep_id, :].sum(axis=1)[1:] - ep_loads
if self.max_step > 0:
scores_dn = scores_dn[: self.max_step]
@@ -253,13 +255,13 @@ def _compute_episode_score(
ep_losses = ep_losses[: self.max_step]
# do nothing operationnal cost
- ep_do_nothing_operat_cost = np.sum(scores_dn)
+ ep_do_nothing_operat_cost = scores_dn.sum()
ep_do_nothing_operat_cost += (
- np.sum(ep_loads[dn_step_played:]) * ep_marginal_cost
+ ep_loads[dn_step_played:].sum() * ep_marginal_cost
)
# no overflow disconnection cost
- ep_do_nothing_nodisc_cost = np.sum(scores_no_ov_rp)
+ ep_do_nothing_nodisc_cost = scores_no_ov_rp.sum()
# this agent cumulated operationnal cost
# same as above: i remove the last element which correspond to the last state, so irrelevant
@@ -268,17 +270,17 @@ def _compute_episode_score(
)
if dn_metadata["max_step"] == self.max_step:
ep_cost = ep_cost[:-1]
- ep_cost = np.sum(ep_cost)
- ep_cost += np.sum(ep_loads[n_played:]) * ep_marginal_cost
+ ep_cost = ep_cost.sum()
+ ep_cost += ep_loads[n_played:].sum() * ep_marginal_cost
# Compute ranges
worst_operat_cost = (
- np.sum(ep_loads) * ep_marginal_cost
+ ep_loads.sum() * ep_marginal_cost
) # operational cost corresponding to the min score
zero_operat_score = ep_do_nothing_operat_cost
nodisc_oeprat_cost = ep_do_nothing_nodisc_cost
best_score = (
- np.sum(ep_losses) * min_losses_ratio
+ ep_losses.sum() * min_losses_ratio
) # operational cost corresponding to the max score
# Linear interp episode reward to codalab score
@@ -357,7 +359,7 @@ def get(self, agent, path_save=None, nb_process=1):
if self.verbose >= 1:
print("Starts the evaluation of the agent") # TODO logger
- EpisodeStatistics.run_env(
+ nb_highres_sim = EpisodeStatistics.run_env(
self.env,
env_seeds=self.env_seeds,
agent_seeds=self.agent_seeds,
@@ -369,7 +371,9 @@ def get(self, agent, path_save=None, nb_process=1):
nb_scenario=self.nb_scenario,
pbar=self.verbose >= 2,
nb_process=nb_process,
+ add_nb_highres_sim=self.add_nb_highres_sim,
)
+ # NB nb_highres_sim is None if self.add_nb_highres_sim is False !
if self.verbose >= 1:
print("Start the evaluation of the scores") # TODO logger
@@ -406,7 +410,10 @@ def get(self, agent, path_save=None, nb_process=1):
if need_delete:
dir_tmp.cleanup()
- return all_scores, ts_survived, total_ts
+ res = all_scores, ts_survived, total_ts
+ if self.add_nb_highres_sim:
+ res = all_scores, ts_survived, total_ts, nb_highres_sim
+ return res
if __name__ == "__main__":
diff --git a/grid2op/utils/l2rpn_idf_2023_scores.py b/grid2op/utils/l2rpn_idf_2023_scores.py
index 8a4502634..307cf3881 100644
--- a/grid2op/utils/l2rpn_idf_2023_scores.py
+++ b/grid2op/utils/l2rpn_idf_2023_scores.py
@@ -6,9 +6,11 @@
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
+import numpy as np
from grid2op.utils.l2rpn_2020_scores import ScoreL2RPN2020
-from grid2op.Reward import L2RPNSandBoxScore, _NewRenewableSourcesUsageScore, _AssistantConfidenceScore, _AssistantCostScore
+from grid2op.Reward import L2RPNSandBoxScore, _NewRenewableSourcesUsageScore, _AlertTrustScore
from grid2op.utils.underlying_statistics import EpisodeStatistics
+from grid2op.Exceptions import Grid2OpException
class ScoreL2RPN2023(ScoreL2RPN2020):
@@ -72,24 +74,32 @@ class ScoreL2RPN2023(ScoreL2RPN2020):
"""
def __init__(
- self,
- env,
- env_seeds=None,
- agent_seeds=None,
- nb_scenario=16,
- min_losses_ratio=0.8,
- verbose=0,
- max_step=-1,
- nb_process_stats=1,
- scale_assistant_score=100.0,
- scale_nres_score=100.,
- weight_op_score=0.6,
- weight_assistant_score=0.25,
- weight_nres_score=0.15,
- weight_confidence_assistant_score=0.7,
- min_nres_score=-100,
+ self,
+ env,
+ env_seeds=None,
+ agent_seeds=None,
+ nb_scenario=16,
+ min_losses_ratio=0.8,
+ verbose=0,
+ max_step=-1,
+ nb_process_stats=1,
+ scores_func={
+ "grid_operational_cost": L2RPNSandBoxScore,
+ "assistant_confidence": _AlertTrustScore,
+ "new_renewable_sources_usage": _NewRenewableSourcesUsageScore,
+ },
+ score_names=["grid_operational_cost_scores",
+ "assistant_confidence_scores",
+ "new_renewable_sources_usage_scores"],
+ add_nb_highres_sim=False,
+ scale_assistant_score=100.0,
+ scale_nres_score=100.,
+ weight_op_score=0.6,
+ weight_assistant_score=0.25,
+ weight_nres_score=0.15,
+ min_nres_score=-100,
+ min_assistant_score=-300
):
-
ScoreL2RPN2020.__init__(
self,
env=env,
@@ -100,37 +110,37 @@ def __init__(
verbose=verbose,
max_step=max_step,
nb_process_stats=nb_process_stats,
- scores_func={
- "grid_operational_cost": L2RPNSandBoxScore,
- #"assistance_confidence": _AssistantConfidenceScore,
- #"assistant_cost": _AssistantCostScore,
- "new_renewable_sources_usage": _NewRenewableSourcesUsageScore,
- },
- score_names=["grid_operational_cost_scores",
- #"assistant_confidence_scores",
- #"assistant_cost_scores",
- "new_renewable_sources_usage_scores"],
+ scores_func=scores_func,
+ score_names=score_names,
+ add_nb_highres_sim=add_nb_highres_sim,
)
-
- assert(weight_op_score + weight_assistant_score + weight_nres_score==1.)
- assert(all([weight_confidence_assistant_score>=0., weight_confidence_assistant_score<=1.]))
-
+ weights=np.array([weight_op_score,weight_assistant_score,weight_nres_score])
+ total_weights = weights.sum()
+ if total_weights != 1.0:
+ raise Grid2OpException(
+ 'The weights of each component of the score shall sum to 1'
+ )
+ if np.any(weights <0):
+ raise Grid2OpException(
+ 'All weights should be positive'
+ )
+
self.scale_assistant_score = scale_assistant_score
self.scale_nres_score = scale_nres_score
self.weight_op_score = weight_op_score
self.weight_assistant_score = weight_assistant_score
self.weight_nres_score = weight_nres_score
- self.weight_confidence_assistant_score = weight_confidence_assistant_score
self.min_nres_score = min_nres_score
+ self.min_assistant_score = min_assistant_score
def _compute_episode_score(
- self,
- ep_id, # the ID here, which is an integer and is not the ID from chronics balblabla
- meta,
- other_rewards,
- dn_metadata,
- no_ov_metadata,
- score_file_to_use="grid_operational_cost_scores",
+ self,
+ ep_id, # the ID here, which is an integer and is not the ID from chronics balblabla
+ meta,
+ other_rewards,
+ dn_metadata,
+ no_ov_metadata,
+ score_file_to_use="grid_operational_cost_scores",
):
"""
Performs the rescaling of the score given the information stored in the "statistics" of this
@@ -154,33 +164,24 @@ def _compute_episode_score(
)
# should match underlying_statistics.run_env `dict_kwg["other_rewards"][XXX] = ...`
# XXX is right now f"{EpisodeStatistics.KEY_SCORE}_{nm}" [this should match the XXX]
-
- #retrieve nres_score
+
+ # retrieve nres_score
new_renewable_sources_usage_score_nm = "new_renewable_sources_usage_scores"
real_nm = EpisodeStatistics._nm_score_from_attr_name(new_renewable_sources_usage_score_nm)
key_score_file = f"{EpisodeStatistics.KEY_SCORE}_{real_nm}"
nres_score = float(other_rewards[-1][key_score_file])
nres_score = max(nres_score, self.min_nres_score / self.scale_nres_score)
- nres_score = self.scale_nres_score * nres_score
-
- #assistant_confidence_score
- # new_renewable_sources_usage_score_nm = "assistant_confidence_scores"
- # real_nm = EpisodeStatistics._nm_score_from_attr_name(new_renewable_sources_usage_score_nm)
- # key_score_file = f"{EpisodeStatistics.KEY_SCORE}_{real_nm}"
- # assistant_confidence_score = float(other_rewards[-1][key_score_file])
- assistant_confidence_score = 0 #self.scale_assistant_score * assistant_confidence_score
-
- #assistant_cost_score
- # new_renewable_sources_usage_score_nm = "assistant_cost_scores"
- # real_nm = EpisodeStatistics._nm_score_from_attr_name(new_renewable_sources_usage_score_nm)
- # key_score_file = f"{EpisodeStatistics.KEY_SCORE}_{real_nm}"
- # assistant_cost_score = float(other_rewards[-1][key_score_file])
- assistant_cost_score = 0 #self.scale_assistant_score * assistant_cost_score
-
- assistant_score = self.weight_confidence_assistant_score * assistant_confidence_score +\
- (1. - self.weight_confidence_assistant_score) * assistant_cost_score
+ nres_score = self.scale_nres_score * nres_score
+
+ # assistant_score
+ assistant_confidence_score_nm = "assistant_confidence_scores"
+ real_nm = EpisodeStatistics._nm_score_from_attr_name(assistant_confidence_score_nm)
+ key_score_file = f"{EpisodeStatistics.KEY_SCORE}_{real_nm}"
+ assistant_confidence_score = float(other_rewards[-1][key_score_file])
+ assistant_confidence_score = max(assistant_confidence_score, self.min_assistant_score / self.scale_assistant_score)
+ assistant_score = self.scale_assistant_score * assistant_confidence_score
ep_score = (
- self.weight_op_score * op_score + self.weight_nres_score * nres_score + self.weight_assistant_score * assistant_score
+ self.weight_op_score * op_score + self.weight_nres_score * nres_score + self.weight_assistant_score * assistant_score
)
- return (ep_score, op_score, nres_score, assistant_confidence_score, assistant_cost_score), n_played, total_ts
+ return (ep_score, op_score, nres_score, assistant_score), n_played, total_ts
diff --git a/grid2op/utils/l2rpn_wcci_2022_scores.py b/grid2op/utils/l2rpn_wcci_2022_scores.py
index dac710d92..7b0611e65 100644
--- a/grid2op/utils/l2rpn_wcci_2022_scores.py
+++ b/grid2op/utils/l2rpn_wcci_2022_scores.py
@@ -23,5 +23,16 @@ def __init__(self,
verbose=0, max_step=-1,
nb_process_stats=1,
scores_func=L2RPNWCCI2022ScoreFun,
- score_names=None):
- super().__init__(env, env_seeds, agent_seeds, nb_scenario, min_losses_ratio, verbose, max_step, nb_process_stats, scores_func, score_names)
+ score_names=None,
+ add_nb_highres_sim=False):
+ super().__init__(env,
+ env_seeds,
+ agent_seeds,
+ nb_scenario,
+ min_losses_ratio,
+ verbose,
+ max_step,
+ nb_process_stats,
+ scores_func,
+ score_names,
+ add_nb_highres_sim=add_nb_highres_sim)
diff --git a/grid2op/utils/underlying_statistics.py b/grid2op/utils/underlying_statistics.py
index 5916b45d7..a083facec 100644
--- a/grid2op/utils/underlying_statistics.py
+++ b/grid2op/utils/underlying_statistics.py
@@ -104,7 +104,7 @@ class EpisodeStatistics(object):
SCENARIO_IDS = "scenario_ids.npz"
SCORES = "scores.npz"
SCORES_CLEAN = re.sub("\\.npz", "", SCORES)
- KEY_SCORE = "__scores"
+ KEY_SCORE = "_scores"
SCORE_FOOTPRINT = ".has_score"
STATISTICS_FOLDER = "_statistics"
STATISTICS_FOOTPRINT = ".statistics"
@@ -538,6 +538,7 @@ def run_env(
agent_seeds,
pbar,
nb_process,
+ add_nb_highres_sim=False,
):
if scores_func is not None:
@@ -567,7 +568,7 @@ def run_env(
"be a dictionary"
)
runner = Runner(**dict_kwg, agentClass=None, agentInstance=agent)
- runner.run(
+ res_runner = runner.run(
path_save=path_save,
nb_episode=nb_scenario,
max_iter=max_step,
@@ -575,7 +576,13 @@ def run_env(
agent_seeds=agent_seeds,
pbar=pbar,
nb_process=nb_process,
+ add_detailed_output=False, # check the return value if you change this
+ add_nb_highres_sim=add_nb_highres_sim
)
+ if add_nb_highres_sim:
+ res = [el[-1] for el in res_runner]
+ return res
+ return None
def get_metadata(self):
"""return the metadata as a dictionary"""