diff --git a/.circleci/config.yml b/.circleci/config.yml index d5ebb5d59..7f8a504f7 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -140,7 +140,7 @@ jobs: - run: command: | source venv_test/bin/activate - python -m pip install -U "numpy>=1.20,<1.21" "pandas<2.2" "scipy<1.12" numba .[test] + python -m pip install -U "numpy>=1.20,<1.21" "pandas<2.2" "scipy<1.12" numba "pillow<10.4.0" .[test] pip freeze - run: command: | @@ -151,7 +151,7 @@ jobs: - run: command: | source venv_test/bin/activate - python -m pip install -U "numpy>=1.24,<1.25" "pandas<2.2" "scipy<1.12" numba .[test] + python -m pip install -U "numpy>=1.24,<1.25" "pandas<2.2" "scipy<1.12" numba "pillow<10.4.0" .[test] - run: command: | source venv_test/bin/activate @@ -163,7 +163,7 @@ jobs: cd /tmp grid2op.testinstall - legacy_lightsim: + legacy_lightsim_old_pp: executor: python38 # needs to be 38: whl of lightsim were not released for 3.10 at the time resource_class: small steps: @@ -190,6 +190,59 @@ jobs: export _GRID2OP_FORCE_TEST=1 python -m unittest grid2op/tests/test_basic_env_ls.py + legacy_lightsim: + executor: python38 # needs to be 38: whl of lightsim were not released for 3.10 at the time + resource_class: small + steps: + - checkout + - run: + command: | + apt-get update + apt-get install -y coinor-cbc + - run: python -m pip install virtualenv + - run: python -m virtualenv venv_test + - run: + command: | + source venv_test/bin/activate + python -m pip install -U pip setuptools wheel + python -m pip install -U lightsim2grid==0.6.0 gymnasium "numpy<1.22" + - run: + command: | + source venv_test/bin/activate + python -m pip install -e . + pip freeze + - run: + command: | + source venv_test/bin/activate + export _GRID2OP_FORCE_TEST=1 + python -m unittest grid2op/tests/test_basic_env_ls.py + + test_chronix2grid: + executor: python310 # needs to be 38: whl of lightsim were not released for 3.10 at the time + resource_class: small + steps: + - checkout + - run: + command: | + apt-get update + apt-get install -y coinor-cbc + - run: python -m pip install virtualenv + - run: python -m virtualenv venv_test + - run: + command: | + source venv_test/bin/activate + python -m pip install -U pip setuptools wheel "numpy==1.26.4" + - run: + command: | + source venv_test/bin/activate + python -m pip install -e .[chronix2grid] "linopy==0.3.8" "scs==3.2.4.post1" "ecos==2.0.13" "pillow==10.3.0" "numpy==1.26.4" "xarray==2024.3.0" + pip freeze + - run: + command: | + source venv_test/bin/activate + export _GRID2OP_FORCE_TEST=1 + python -m unittest grid2op/tests/fromChronix2grid.py + install39: executor: python39 resource_class: small @@ -205,8 +258,7 @@ jobs: command: | export _GRID2OP_FORCE_TEST=1 source venv_test/bin/activate - python -m pip install -U pip setuptools wheel "numpy>=1.20,<1.21" "pandas<2.2" "scipy==1.10.1" numba - python -m pip install "chronix2grid>=1.1.0.post1" "gymnasium==0.26.3" "matplotlib==3.7.5" "xarray==2023.10.0" "scs==3.0.0" "ecos==2.0.0" + python -m pip install -U pip setuptools wheel "numpy>=1.20,<1.21" "pandas<2.2" "scipy==1.10.1" "pillow<10.4.0" numba python -m pip uninstall -y grid2op - run: command: | # issue with previous more simple install, so I fix some versions @@ -222,7 +274,7 @@ jobs: - run: command: | source venv_test/bin/activate - python -m pip install "numpy>=1.26,<1.27" "pandas<2.2" "scipy<1.12" numba + python -m pip install "numpy>=1.26,<1.27" "pandas<2.2" "scipy<1.12" numba "pillow<10.4.0" pip freeze - run: command: | @@ -340,7 +392,10 @@ workflows: test: jobs: - test + - legacy_lightsim_old_pp - legacy_lightsim + - test_chronix2grid + install: jobs: - install38 diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index a41c23e11..1e8054ad0 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -12,6 +12,8 @@ jobs: name: Build linux ${{ matrix.python.name }} wheel runs-on: ubuntu-latest container: quay.io/pypa/manylinux2014_x86_64 + env: + ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION: true strategy: matrix: python: @@ -59,7 +61,7 @@ jobs: - name: Build wheel run: | - python3 setup.py bdist_wheel + python setup.py bdist_wheel # auditwheel repair dist/*.whl # only for compiled code ! - name: Install wheel @@ -69,12 +71,16 @@ jobs: - name: Check package can be imported run: | - python3 -c "import grid2op" - python3 -c "from grid2op import *" - python3 -c "from grid2op.Action._backendAction import _BackendAction" - + python -c "import grid2op" + python -c "from grid2op import *" + python -c "from grid2op.Action._backendAction import _BackendAction" + + - name: List wheel + run: + ls ./dist/*.whl + - name: Upload wheel - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: grid2op-wheel-${{ matrix.config.name }}-${{ matrix.python.name }} path: dist/*.whl @@ -165,10 +171,70 @@ jobs: name: grid2op-sources path: dist/*.tar.gz + auto_class_in_file: + name: Test ${{ matrix.config.name }} OS can handle automatic class generation + runs-on: ${{ matrix.config.os }} + strategy: + matrix: + config: + - { + name: darwin, + os: macos-latest, + } + # - { + # name: windows, + # os: windows-2019, + # } + - { + name: ubuntu, + os: ubuntu-latest, + } + python: + - { + name: cp39, + version: '3.9', + } + - { + name: cp312, + version: '3.12', + } + + steps: + + - name: Checkout sources + uses: actions/checkout@v1 + with: + submodules: true + + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python.version }} + + - name: Install Python dependencies + run: | + python -m pip install --upgrade pip + python -m pip install --upgrade wheel + python -m pip install --upgrade setuptools + python -m pip install --upgrade gymnasium "numpy<2" + + - name: Build wheel + run: python setup.py bdist_wheel + + - name: Install wheel + shell: bash + run: | + python -m pip install dist/*.whl --user + pip freeze + + - name: Test the automatic generation of classes in the env folder + run: | + python -m unittest grid2op/tests/automatic_classes.py -f + package: name: Test install runs-on: ubuntu-latest - needs: [manylinux_build, macos_windows_build] + needs: [manylinux_build, macos_windows_build, auto_class_in_file] steps: - name: Download wheels diff --git a/.gitignore b/.gitignore index ba9e6e67b..6bd200b60 100644 --- a/.gitignore +++ b/.gitignore @@ -410,6 +410,12 @@ grid2op/tests/req_38_np121 test_make_2_envs.py getting_started/env_py38_grid2op110_ray110.ipynb getting_started/env_py38_grid2op110_ray210.ipynb +grid2op/tests/req_chronix2grid +grid2op/tests/venv_test_chronix2grid/ +getting_started/venv_310_ray/ +grid2op/tests/venv_test_autoclass/ +test_eduardo.py +grid2op/tests/failed_test* # profiling files **.prof diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 6d4271a18..3acf1eaa0 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -31,9 +31,113 @@ Change Log - [???] "asynch" multienv - [???] properly model interconnecting powerlines +Work kind of in progress +---------------------------------- +- TODO A number of max buses per sub +- TODO in the runner, save multiple times the same sceanrio +- TODO in the gym env, make the action_space and observation_space attribute + filled automatically (see ray integration, it's boring to have to copy paste...) + +Next release +--------------------------------- +- numpy 2 compat (need pandapower for that) +- automatic read from local dir also on windows ! +- TODO doc for the "new" feature of automatic "experimental_read_from_local_dir" +- TODO bug on maintenance starting at midnight (they are not correctly handled in the observation) + => cf script test_issue_616 +- TODO put the Grid2opEnvWrapper directly in grid2op as GymEnv +- TODO faster gym_compat (especially for DiscreteActSpace and BoxGymObsSpace) +- TODO Notebook for tf_agents +- TODO Notebook for acme +- TODO Notebook using "keras rl" (see https://keras.io/examples/rl/ppo_cartpole/) +- TODO example for MCTS https://github.com/bwfbowen/muax et https://github.com/google-deepmind/mctx +- TODO jax everything that can be: create a simple env based on jax for topology manipulation, without + redispatching or rules +- TODO backend in jax, maybe ? +- TODO done and truncated properly handled in gym_compat module (when game over + before the end it's probably truncated and not done) +- TODO when reset, have an attribute "reset_infos" with some infos about the + way reset was called. +- TODO ForecastEnv in MaskedEnv ! (and obs.simulate there too !) +- TODO finish the test in automatic_classes +- TODO in multi-mix increase the reset options with the mix the user wants +- TODO L2RPN scores as reward (sum loads after the game over and have it in the final reward) +- TODO on CI: test only gym, only gymnasium and keep current test for both gym and gymnasium + +[1.10.3] - 2024-07-yy +------------------------- +- TODO Automatic "experimental_read_from_local_dir" + +- [BREAKING] `env.chronics_hander.set_max_iter(xxx)` is now a private function. Use + `env.set_max_iter(xxx)` or even better `env.reset(options={"max step": xxx})`. + Indeed, `env.chronics_hander.set_max_iter()` will likely have + no effect at all on your environment. +- [BREAKING] for all the `Handler` (*eg* `CSVForecastHandler`) the method `set_max_iter` is + now private (for the same reason as the `env.chronics_handler`). We do not recommend to + use it (will likely have no effect). Prefer using `env.set_max_iter` instead. +- [BREAKING] now the `runner.run()` method only accept kwargs argument + (because it should always have been like this) +- [BREAKING] to improve pickle support and multi processing capabilities, the attribute + `gym_env.observation_space._init_env` and `gym_env.observation_space.initial_obs_space` + have been deleted (for the `Dict` space only, for the other spaces like the `Box` they + were not present in the first place) +- [BREAKING] in the `GymEnv` class now by default the underlying grid2op environment has no + forecast anymore in an attempt to make this wrapper faster AND more easily pickle-able. You can + retrieve the old behaviour by passing `gym_env = GymEnv(grid2op_env, with_forecast=True)` +- [FIXED] a bug in the `MultiFolder` and `MultifolderWithCache` leading to the wrong + computation of `max_iter` on some corner cases +- [FIXED] the function `cleanup_action_space()` did not work correctly when the "chronics_hander" + was not initialized for some classes +- [FIXED] the `_observationClass` attribute of the "observation env" (used for simulate and forecasted env) + is now an Observation and not an Action. +- [FIXED] a bug when deep copying an "observation environment" (it changes its class) +- [FIXED] issue on `seed` and `MultifolderWithCache` which caused + https://github.com/rte-france/Grid2Op/issues/616 +- [FIXED] another issue with the seeding of `MultifolderWithCache`: the seed was not used + correctly on the cache data when calling `chronics_handler.reset` multiple times without + any changes +- [FIXED] `Backend` now properly raise EnvError (grid2op exception) instead of previously + `EnvironmentError` (python default exception) +- [FIXED] a bug in `PandaPowerBackend` (missing attribute) causing directly + https://github.com/rte-france/Grid2Op/issues/617 +- [FIXED] a bug in `Environment`: the thermal limit were used when loading the environment + even before the "time series" are applied (and before the user defined thermal limits were set) + which could lead to disconnected powerlines even before the initial step (t=0, when time + series are loaded) +- [FIXED] an issue with the "max_iter" for `FromNPY` time series generator +- [FIXED] a bug in `MultiMixEnvironment` : a multi-mix could be created even if the underlying + powergrids (for each mix) where not the same. +- [FIXED] a bug in `generate_classes` (experimental_read_from_local_dir) with alert data. +- [FIXED] a bug in the `Runner` when using multi processing on macos and windows OS: some non default + parameters where not propagated in the "child" process (bug in `runner._ger_params`) +- [ADDED] possibility to skip some step when calling `env.reset(..., options={"init ts": ...})` +- [ADDED] possibility to limit the duration of an episode with `env.reset(..., options={"max step": ...})` +- [ADDED] possibility to specify the "reset_options" used in `env.reset` when + using the runner with `runner.run(..., reset_options=xxx)` +- [ADDED] the argument `mp_context` when building the runner to help pass a multiprocessing context in the + grid2op `Runner` +- [ADDED] the time series are now able to regenerate their "random" part + even when "cached" thanks to the addition of the `regenerate_with_new_seed` of the + `GridValue` class (in public API) +- [ADDED] `MultifolderWithCache` now supports `FromHandlers` time series generator +- [IMPROVED] more consistency in the way the classes are initialized at the creation of an environment +- [IMPROVED] more consistency when an environment is copied (some attributes of the copied env were + deep copied incorrectly) +- [IMPROVED] Doc about the runner +- [IMPROVED] the documentation on the `time series` folder. +- [IMPROVED] now the "maintenance from json" (*eg* the `JSONMaintenanceHandler` or the + `GridStateFromFileWithForecastsWithMaintenance`) can be customized with the day + of the week where the maintenance happens (key `maintenance_day_of_week`) +- [IMPROVED] in case of "`MultiMixEnvironment`" there is now only class generated for + all the underlying mixes (instead of having one class per mixes) +- [IMPROVED] the `EpisodeData` have now explicitely a mode where they can be shared accross + processes (using `fork` at least), see `ep_data.make_serializable` +- [IMPROVED] chronix2grid tests are now done independantly on the CI + + [1.10.2] - 2024-05-27 ------------------------- -- [BREAKING] the `runner.run_one_episode` now returns an extra first argument: +- [BREAKING] the `runner.run_one_episode` now returns an extra argument (first position): `chron_id, chron_name, cum_reward, timestep, max_ts = runner.run_one_episode()` which is consistant with `runner.run(...)` (previously it returned only `chron_name, cum_reward, timestep, max_ts = runner.run_one_episode()`) @@ -822,7 +926,7 @@ Change Log `Issue#185 `_ ) - [IMPROVED] the seed of openAI gym for composed action space (see issue `https://github.com/openai/gym/issues/2166`): in waiting for an official fix, grid2op will use the solution proposed there - https://github.com/openai/gym/issues/2166#issuecomment-803984619 ) + https://github.com/openai/gym/issues/2166#issuecomment-803984619 [1.5.1] - 2021-04-15 ----------------------- diff --git a/docs/chronics.rst b/docs/chronics.rst index 8a13f5674..1557ab07f 100644 --- a/docs/chronics.rst +++ b/docs/chronics.rst @@ -54,26 +54,158 @@ come from the :class:`grid2op.GridValue` and are detailed in the :func:`GridValue.forecasts` method. -More control on the chronics +More control on the time series ------------------------------- We explained, in the description of the :class:`grid2op.Environment` in sections :ref:`environment-module-chronics-info` and following how to have more control on which chronics is used, with steps are used within a chronics etc. We will not detailed here again, please refer to this page for more information. -However, know that you can have a very detailed control on which chronics are used: +However, know that you can have a very detailed control on which time series using the `options` +kwargs of a call to `env.reset()` (or the `reset_otions` kwargs when calling the +`runner.run()`) : -- use `env.set_id(THE_CHRONIC_ID)` (see :func:`grid2op.Environment.Environment.set_id`) to set the id of the - chronics you want to use -- use `env.chronics_handler.set_filter(a_function)` (see :func:`grid2op.Chronics.GridValue.set_filter`) + +Use a specific time serie for an episode +******************************************* + +To use a specific time series for a given episode, you can use +`env.reset(options={"time serie id": THE_ID_YOU_WANT)`. + +For example: + +.. code-block:: python + + import grid2op + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name) + + # you can use an int: + obs = env.reset(options={"time serie id": 0}) + + # or the name of the folder (for most grid2op environment) + obs = env.reset(options={"time serie id": "0000"}) # for l2rpn_case14_sandbox + + # for say l2rpn_neurips_2020_track1 + # obs = env.reset(options={"time serie id": "Scenario_august_008"}) + + # for say l2rpn_idf_2023 + # obs = env.reset(options={"time serie id": "2035-04-23_7"}) + + +.. note:: + For oldest grid2op versions (please upgrade if that's the case) you needed to use: + `env.set_id(THE_CHRONIC_ID)` (see :func:`grid2op.Environment.Environment.set_id`) to set the id of the + chronics you want to use. + + +Skipping the initial few steps +******************************* + +Often the time series provided for an environment always start at the same date and time on +the same hour of the day and day of the week. It might not be ideal to learn controler +with such data or might "burn up" computation time during evaluation. + +To do that, you can use the `"init ts"` reset options, for example with: + +.. code-block:: python + + import grid2op + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name) + + # you can use an int: + obs = env.reset(options={"init ts": 12}) + + # obs will skip the first hour of the time series + # 12 steps is equivalent to 1h (5 mins per step in general) + + +.. note:: + + For oldest grid2op versions (please upgrade if that's the case) you needed to use: + `env.fast_forward_chronics(nb_time_steps)` + (see :func:`grid2op.Environment.BaseEnv.fast_forward_chronics`) to skip initial + few steps + of a given chronics. + + Please be aware that this "legacy" behaviour has some issues and is "less clear" + than the "init ts" above and it can have some weird combination with + `set_max_iter` for example. + + +Limit the maximum length of the current episode +************************************************* + +For most enviroment, the maximum duration of an episode is the equivalent of a week +(~2020 steps) or a month (~8100 steps) which might be too long for some usecase. + +Anyway, if you want to reduce it, you can now do it with the `"max step"` reset +option like this: + +.. code-block:: python + + import grid2op + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name) + + # you can use an int: + obs = env.reset(options={"max step": 2*288}) + + # the maximum duration of the episode is now 2*288 steps + # the equivalent of two days + +.. note:: + + For oldest grid2op versions (please upgrade if that's the case) you needed to use: + `env.chronics_handler.set_max_iter(nb_max_iter)` + (see :func:`grid2op.Chronics.ChronicsHandler.set_max_iter`) to limit the number + of steps within an episode. + + Please be aware that this "legacy" behaviour has some issues and is "less clear" + than the "init ts" above and it can have some weird combination with + `fast_forward_chronics` for example. + +Discard some time series from the existing folder +************************************************** + +The folder containing the time series for a given grid2op environment often contains +dozens (thousands sometimes) different time series. + +You might want to use only part of them at some point (whether it's some for training and some +for validation and test, or some for training an agent on a process and some to train the +same agent on another process etc.) + +Anyway, if you want to do this (on the majority of released environments) you can do it +thanks to the `env.chronics_handler.set_filter(a_function)`. + +For example: + +.. code-block:: python + + import re + import grid2op + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name) + + def keep_only_some_ep(chron_name): + return re.match(r".*00.*", chron_name) is not None + + env.chronics_handler.set_filter(keep_only_some_ep) + li_episode_kept = env.chronics_handler.reset() + + +.. note:: + For oldest grid2op versions (please upgrade if that's the case) you needed to use: + use `env.chronics_handler.set_filter(a_function)` (see :func:`grid2op.Chronics.GridValue.set_filter`) to only use certain chronics + + - use `env.chronics_handler.sample_next_chronics(probas)` (see :func:`grid2op.Chronics.GridValue.sample_next_chronics`) to draw at random some chronics -- use `env.fast_forward_chronics(nb_time_steps)` - (see :func:`grid2op.Environment.BaseEnv.fast_forward_chronics`) to skip initial number of steps - of a given chronics -- use `env.chronics_handler.set_max_iter(nb_max_iter)` - (see :func:`grid2op.Chronics.ChronicsHandler.set_max_iter`) to limit the number of steps within an episode + +Performance gain (throughput) +******************************** Chosing the right chronics can also lead to some large advantage in terms of computation time. This is particularly true if you want to benefit the most from HPC for example. More detailed is given in the diff --git a/docs/conf.py b/docs/conf.py index 88311b9e7..8d3d22dd7 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -22,7 +22,7 @@ author = 'Benjamin Donnot' # The full version, including alpha/beta/rc tags -release = '1.10.2' +release = '1.10.3.dev1' version = '1.10' diff --git a/docs/grid2op.rst b/docs/grid2op.rst index 1e115f329..02fc4826d 100644 --- a/docs/grid2op.rst +++ b/docs/grid2op.rst @@ -447,6 +447,7 @@ alert (when the attack is happening) Disclaimer ----------- + Grid2op is a research testbed platform, it has not been tested in "production" context Going further @@ -458,3 +459,5 @@ more information and a detailed tour about the issue that grid2op tries to addre .. note:: As of writing (december 2020) most of these notebooks focus on the "agent" part of grid2op. We would welcome any contribution to better explain the other aspect of this platform. + +.. include:: final.rst diff --git a/docs/gym.rst b/docs/gym.rst index 06fe365f7..02e47d796 100644 --- a/docs/gym.rst +++ b/docs/gym.rst @@ -504,37 +504,7 @@ This is because grid2op will (to save computation time) generate some classes (t fly, once the environment is loaded. And unfortunately, pickle module is not always able to process these (meta) data. -Try to first create (automatically!) the files containing the description of the classes -used by your environment (for example): - -.. code-block:: python - - from grid2op import make - from grid2op.Reward import RedispReward - from lightsim2grid import LightSimBackend - - env_name = 'l2rpn_wcci_2022' - backend_class = LightSimBackend - env = make(env_name, reward_class=RedispReward, backend=backend_class()) - env.generate_classes() - -.. note:: - This piece of code is to do once (each time you change the backend or the env name) - -And then proceed as usual by loading the grid2op environment -with the key-word `experimental_read_from_local_dir` - -.. code-block:: python - - from grid2op import make - from grid2op.Reward import RedispReward - from lightsim2grid import LightSimBackend - - env_name = 'l2rpn_wcci_2022' - backend_class = LightSimBackend - env = make(env_name, reward_class=RedispReward, backend=backend_class(), - experimental_read_from_local_dir=True) - # do whatever +You can solve this issue by look at :ref:`troubleshoot_pickle` section of the documentation. Observation XXX outside given space YYY **************************************** @@ -560,4 +530,4 @@ Detailed Documentation by class :members: :autosummary: -.. include:: final.rst \ No newline at end of file +.. include:: final.rst diff --git a/docs/index.rst b/docs/index.rst index 31dd1f648..42179d3b4 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -75,6 +75,7 @@ Environments create_an_environment dive_into_time_series data_pipeline + troubleshoot Usage examples --------------------- diff --git a/docs/model_based.rst b/docs/model_based.rst index 3645fa6e9..5bd373985 100644 --- a/docs/model_based.rst +++ b/docs/model_based.rst @@ -378,3 +378,5 @@ And for the `ExampleAgent2`: res = strat[0] # action is the first one of the best strategy highest_score = ts_survived return res + +.. include:: final.rst diff --git a/docs/model_free.rst b/docs/model_free.rst index db326736f..94f8f7458 100644 --- a/docs/model_free.rst +++ b/docs/model_free.rst @@ -17,3 +17,5 @@ Some examples are given in "l2rpn-baselines": - `PPO with RLLIB `_ - `PPO with stable-baselines3 `_ + +.. include:: final.rst diff --git a/docs/observation.rst b/docs/observation.rst index 97a881108..05bb35a75 100644 --- a/docs/observation.rst +++ b/docs/observation.rst @@ -133,4 +133,4 @@ Detailed Documentation by class :special-members: :autosummary: -.. include:: final.rst \ No newline at end of file +.. include:: final.rst diff --git a/docs/optimization.rst b/docs/optimization.rst index 24a58e304..ba9407a8e 100644 --- a/docs/optimization.rst +++ b/docs/optimization.rst @@ -19,3 +19,5 @@ Basically an "optimizer" agent looks like (from a very high level): 3) update the "formulation" using the observation received 4) run a solver to solve the "problem" 5) convert back the "decisions" (output) of the solver into a "grid2op" action + +.. include:: final.rst diff --git a/docs/quickstart.rst b/docs/quickstart.rst index 3a641da06..3955b8182 100644 --- a/docs/quickstart.rst +++ b/docs/quickstart.rst @@ -121,3 +121,5 @@ The most basic code, for those familiar with openAI gym (a well-known framework To make the use of grid2op alongside grid2op environment easier, we developed a module described in :ref:`openai-gym`. + +.. include:: final.rst diff --git a/docs/runner.rst b/docs/runner.rst index 266c26c2d..2752971cc 100644 --- a/docs/runner.rst +++ b/docs/runner.rst @@ -125,6 +125,189 @@ For information, as of writing (march 2021): - macOS with python <= 3.7 will behave like any python version on linux - windows and macOS with python >=3.8 will behave differently than linux but similarly to one another +Some common runner options: +------------------------------- + +Specify an agent instance and not a class +******************************************* + +By default, if you specify an agent class (*eg* `AgentCLS`), then the runner will initialize it with: + +.. code-block:: python + + agent = AgentCLS(env.action_space) + +But you might want to use agent initialized in a more complex way. To that end, you can customize the +agent instance you want to use (and not only its class) with the following code: + +.. code-block:: python + + import grid2op + from grid2op.Agent import RandomAgent # for example... + from grid2op.Runner import Runner + + env = grid2op.make("l2rpn_case14_sandbox") + + agent_instance = RandomAgent(env.action_space) + runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=agent_instance) + res = runner.run(nb_episode=nn_episode) + +Customize the scenarios +************************** + +You can customize the seeds, the scenarios ID you want, the number of initial steps to skip, the +maximum duration of an episode etc. For more information, please refer to the :func:`Runner.run` +for more information. But basically, you can do: + +.. code-block:: python + + import grid2op + from grid2op.Agent import RandomAgent # for example... + from grid2op.Runner import Runner + + env = grid2op.make("l2rpn_case14_sandbox") + + agent_instance = RandomAgent(env.action_space) + runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=agent_instance) + res = runner.run(nb_episode=nn_episode, + + # nb process to use + nb_process=1, + + # path where the outcome will be saved + path_save=None, + + # max number of steps in an environment + max_iter=None, + + # progress bar to use + pbar=False, + + # seeds to use for the environment + env_seeds=None, + + # seeds to use for the agent + agent_seeds=None, + + # id the time serie to use + episode_id=None, + + # whether to add the outcome (EpisodeData) as a result of this function + add_detailed_output=False, + + # whether to keep track of the number of call to "high resolution simulator" + # (eg obs.simulate or obs.get_forecasted_env) + add_nb_highres_sim=False, + + # which initial state you want the grid to be in + init_states=None, + + # options passed in `env.reset(..., options=XXX)` + reset_options=None, + ) + + +Retrieve what has happened +**************************** + +You can also easily retrieve the :class:`grid2op.Episode.EpisodeData` representing your runs with: + +.. code-block:: python + + import grid2op + from grid2op.Agent import RandomAgent # for example... + from grid2op.Runner import Runner + + env = grid2op.make("l2rpn_case14_sandbox") + + agent_instance = RandomAgent(env.action_space) + runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=agent_instance) + res = runner.run(nb_episode=2, + add_detailed_output=True) + for *_, ep_data in res: + # ep_data are the EpisodeData you can use to do whatever + ... + +Save the results +***************** + +You can save the results in a standardized format with: + +.. code-block:: python + + import grid2op + from grid2op.Agent import RandomAgent # for example... + from grid2op.Runner import Runner + + env = grid2op.make("l2rpn_case14_sandbox") + + agent_instance = RandomAgent(env.action_space) + runner = Runner(**env.get_params_for_runner(), + agentClass=None, + agentInstance=agent_instance) + res = runner.run(nb_episode=2, + save_path="A/PATH/SOMEWHERE") # eg "/home/user/you/grid2op_results/this_run" + +Multi processing +*********************** + +You can also easily (on some platform) easily make the evaluation faster by using the "multi processing" python +package with: + +.. code-block:: python + + import grid2op + from grid2op.Agent import RandomAgent # for example... + from grid2op.Runner import Runner + + env = grid2op.make("l2rpn_case14_sandbox") + + agent_instance = RandomAgent(env.action_space) + runner = Runner(**env.get_params_for_runner(), + agentClass=None, + agentInstance=agent_instance) + res = runner.run(nb_episode=2, + nb_process=2) + +Customize the multi processing +******************************** + +And, as of grid2op 1.10.3 you can know customize the multi processing context you want +to use to evaluate your agent, like this: + +.. code-block:: python + + import multiprocessing as mp + import grid2op + from grid2op.Agent import RandomAgent # for example... + from grid2op.Runner import Runner + + env = grid2op.make("l2rpn_case14_sandbox") + + agent_instance = RandomAgent(env.action_space) + + ctx = mp.get_context('spawn') # or "fork" or "forkserver" + runner = Runner(**env.get_params_for_runner(), + agentClass=None, + agentInstance=agent_instance, + mp_context=ctx) + res = runner.run(nb_episode=2, + nb_process=2) + +If you set this, the multiprocessing `Pool` used to evaluate your agents will be made with: + +.. code-block:: python + + with mp_context.Pool(nb_process) as p: + .... + +Otherwise the default "Pool" is used: + +.. code-block:: python + + with Pool(nb_process) as p: + .... + Detailed Documentation by class ------------------------------- diff --git a/docs/timeserie_handlers.rst b/docs/timeserie_handlers.rst index e7a9b1fb5..bd76abddf 100644 --- a/docs/timeserie_handlers.rst +++ b/docs/timeserie_handlers.rst @@ -344,3 +344,5 @@ Detailed Documentation by class .. automodule:: grid2op.Chronics.handlers :members: :autosummary: + +.. include:: final.rst diff --git a/docs/troubleshoot.rst b/docs/troubleshoot.rst new file mode 100644 index 000000000..fbfec1fc4 --- /dev/null +++ b/docs/troubleshoot.rst @@ -0,0 +1,189 @@ + +.. _troubleshoot_page: + +Known issues and workarounds +=============================== + + +In this section we will detail what are the common questions we have regarding grid2op and how to +best solve them (if we are aware of such a way...) + +.. _troubleshoot_pickle: + +Pickle issues +-------------------------- + +The most common (and oldest) issue regarding grid2op is its interaction with the `pickle` module +in python. + +This module is used internally by the `multiprocessing` module and many others. + +By default (and "by design") grid2op will create the classes when an environment +is loaded. You can notice it like this: + +.. code-block:: python + + import grid2op + + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name) + + print(type(env)) + +This will show something like `Environment_l2rpn_case14_sandbox`. This means that, +not only the object `env` is created when you call `grid2op.make` but also +the class that `env` belongs too (in this case `Environment_l2rpn_case14_sandbox`). + +.. note:: + We decided to adopt this design so that the powergrid reprensentation in grid2op + is not copied and can be access pretty easily from pretty much every objects. + + For example you can call `env.n_gen`, `type(env).n_gen`, `env.backend.n_gen`, + `type(env.backend).n_gen`, `obs.n_gen`, `type(obs).n_gen`, `act.n_gen`, + `type(act).n_gen`, `env.observation_space.n_gen`, `type(env.observation_space).n_gen` + well... you get the idea + + But allowing so makes it "hard" for python to understand how to transfer objects + from one "process" to another or to save / restore it (indeed, python does not + save the entire class definition it only saves the class names.) + +This type of issue takes the form of an error with: + +- `XXX_env_name` (*eg* `CompleteObservation_l2rpn_wcci_2022`) is not serializable. +- `_pickle.PicklingError`: Can't pickle : attribute lookup _ObsEnv_l2rpn_case14_sandbox on abc failed + +Automatic 'class_in_file' ++++++++++++++++++++++++++++ + +To solve this issue, we are starting from grid2op 1.10 to introduce some ways +to get around this automatically. It will be integrated incrementally to make +sure not to break any previous code. + +The main idea is that grid2op will define the class as it used to (no change there) +but instead of keeping them "in memory" it will write it on the hard drive (in +a folder within the environment data) each time an environment is created. + +This way, when pickle or multiprocessing will attempt to load the environment class, +they will be able to because the files are stored on the hard drive. + +There are some drawbacks of course. The main one being that creating an environment +can take a bit more time (especially if you have slow I/O). It will also use +a bit of disk space (a few kB so nothing to worry about). + +For now we tested it on multi processing and it gives promising results. + +**TL;DR**: Enable this feature by calling `grid2op.make(env_name, class_in_file=True)` and you're good to go. + +To enable this, you can: + +- define a default behaviour by editing the `~/.grid2opconfig.json` global parameters +- define the environment variable `grid2op_class_in_file` **BEFORE** importing grid2op +- use the kwargs `class_in_file` when calling the `grid2op.make` function + +.. note:: + In case of "conflicting" instruction grid2op will do the following: + + - if `class_in_file` is provided in the call to `grid2op.make(...)` it will use this and ignore everything else + - (else) if the environment variable `grid2op_class_in_file` is defined, grid2op will use it + - (else) if the configuration file is present and the key `class_in_file` is there, grid2op will + use it + - (else) it will use its default behaviour (as of writing, grid2op 1.10.3) it is to **DEACTIVATE** + this feature (in the near future the default will change and it will be activated by default) + +For example: + +The file `~/.grid2opconfig.json` can look like: + +.. code-block:: json + + { + "class_in_file" : false + } + +or +.. code-block:: json + + { + "class_in_file" : true + } + +If you prefer to work with environment variables, we recommend you do something like : + +.. code-block:: python + + import os + + os.environ["grid2op_class_in_file"] = "true" # or "false" if you want to disable it + + import grid2op + +And if you prefer to use it directly in `grid2op.make(...)` funciton, you can do it with: + +.. code-block:: python + + import grid2op + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name, class_in_file=True) # or `class_in_file=False` + + +If you want to know if you environment has used this new feature, you can check with: + +.. code-block:: python + + import grid2op + env = grid2op.make(...) + print(env.classes_are_in_files()) + +.. danger:: + If you use this, make sure (for now) that the original grid2op environment that you have created + is not deleted. If that is the case then the folder containing the classes definition will be + removed and you might not be able to work with grid2op correctly. + + +Experimental `read_from_local_dir` ++++++++++++++++++++++++++++++++++++ + +Before grid2op 1.10.3 the only way to get around pickle / multiprocessing issue was a "two stage" process: +you had first to tell grid2op to generate the classes and then to tell it to use it in all future environment. + +This had the drawbacks that if you changed the backend classes, or the observation classes or the +action classes, you needed to start the whole process again. ANd it as manual so you might have ended up +doing some un intended actions which could create some "silent bugs" (the worst kind, like for example +not using the right class...) + +To do it you first needed to call, once (as long as you did not change backend class or observation or action etc.) +in a **SEPARATE** python script: + +.. code-block:: python + + import grid2op + env_name = "l2rpn_case14_sandbox" # or any other name + + env = grid2op.make(env_name, ...) # again: redo this step each time you customize "..." + # for example if you change the `action_class` or the `backend` etc. + + env.generate_classes() + + +And then, in another script, the main one you want to use: + +.. code-block:: python + + import grid2op + env_name = SAME NAME AS ABOVE + env = grid2op.make(env_name, + experimental_read_from_local_dir=True, + SAME ENV CUSTOMIZATION AS ABOVE) + +As of grid2op 1.10.3 this process can be made automatically (not without some drawbacks, see above). It might +interact in a weird (and unpredictable) way with the `class_in_file` so we would recommend to use one **OR** +(exclusive OR, XOR for the mathematicians) the other but avoid mixing the two: + +- either use `grid2op.make(..., class_in_file=True)` +- or use `grid2op.make(..., experimental_read_from_local_dir=True)` + +Thus we **DO NOT** recommend to use something like +`grid2op.make(..., experimental_read_from_local_dir=True, class_in_file=True)` + + +.. include:: final.rst diff --git a/docs/utils.rst b/docs/utils.rst index fde3a084a..d30de21a1 100644 --- a/docs/utils.rst +++ b/docs/utils.rst @@ -22,4 +22,3 @@ Detailed Documentation by class :autosummary: .. include:: final.rst - diff --git a/docs/voltagecontroler.rst b/docs/voltagecontroler.rst index 19e391297..1c85a3552 100644 --- a/docs/voltagecontroler.rst +++ b/docs/voltagecontroler.rst @@ -41,4 +41,4 @@ Detailed Documentation by class :members: :autosummary: -.. include:: final.rst \ No newline at end of file +.. include:: final.rst diff --git a/examples/backend_integration/Step0_make_env.py b/examples/backend_integration/Step0_make_env.py index cc0d45b60..5d91fbdeb 100644 --- a/examples/backend_integration/Step0_make_env.py +++ b/examples/backend_integration/Step0_make_env.py @@ -41,6 +41,28 @@ from grid2op.Opponent import BaseOpponent +class PandaPowerBackendNoShunt(PandaPowerBackend): + shunts_data_available = False + + +def create_action(env, backend, action): + """this is done internally by grid2op. + + The idea is to generate a "backend action" (which again is provided by grid2op) + easily + """ + # bk_act = env._backend_action_class() + # bk_act += action # action for pandapower backend + # bk_act.reorder(env.backend._load_sr2tg, + # env.backend._gen_sr2tg, + # env.backend._topo_sr2tg, + # env.backend._storage_sr2tg, + # env.backend._shunt_sr2tg) + bk_act = type(backend).my_bk_act_class() + bk_act += action + return bk_act + + def make_env_for_backend(env_name, backend_class): # env_name: one of: # - rte_case5_example: the grid in the documentation (completely fake grid) @@ -65,8 +87,9 @@ def make_env_for_backend(env_name, backend_class): action_class=CompleteAction, # we tell grid2op we will manipulate all type of actions reward_class=ConstantReward, # we don't have yet redispatching data, that might be use by the reward opponent_class=BaseOpponent, # we deactivate the opponents + # backend=backend_class() backend=BackendConverter(source_backend_class=backend_class, - target_backend_class=PandaPowerBackend, + target_backend_class=PandaPowerBackendNoShunt, use_target_backend_name=True) ) obs = env.reset() diff --git a/examples/backend_integration/Step1_loading.py b/examples/backend_integration/Step1_loading.py index 4775ba85d..ac4612169 100644 --- a/examples/backend_integration/Step1_loading.py +++ b/examples/backend_integration/Step1_loading.py @@ -34,6 +34,7 @@ class CustomBackend_Step1(Backend): + shunts_data_available = False def load_grid(self, path : Union[os.PathLike, str], filename : Optional[Union[os.PathLike, str]]=None) -> None: @@ -170,8 +171,8 @@ def lines_ex_info(self)-> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: # storage_pos_topo_vect # for example - print(type(backend).name_load) - print(type(backend).load_to_subid) - print(type(backend).load_to_sub_pos) - print(type(backend).load_pos_topo_vect) + print(f"Name of the loads, seen in grid2op: {type(backend).name_load}") + print(f"Id of substation, for each load: {type(backend).load_to_subid}") + print(f"Position in the substation topology vector, for each load: {type(backend).load_to_sub_pos}") + print(f"Position in the global topology vector, for each load: {type(backend).load_pos_topo_vect}") \ No newline at end of file diff --git a/examples/backend_integration/Step2_modify_load.py b/examples/backend_integration/Step2_modify_load.py index c55049458..4947af3a6 100644 --- a/examples/backend_integration/Step2_modify_load.py +++ b/examples/backend_integration/Step2_modify_load.py @@ -69,7 +69,7 @@ def loads_info(self)-> Tuple[np.ndarray, np.ndarray, np.ndarray]: if __name__ == "__main__": import grid2op import os - from Step0_make_env import make_env_for_backend + from Step0_make_env import make_env_for_backend, create_action path_grid2op = grid2op.__file__ path_data_test = os.path.join(os.path.split(path_grid2op)[0], "data") @@ -105,8 +105,7 @@ def loads_info(self)-> Tuple[np.ndarray, np.ndarray, np.ndarray]: # have the proper size) # this is technical to grid2op (done internally) - bk_act = env._backend_action_class() - bk_act += action + bk_act = create_action(env, backend, action) ############# # this is what the backend receive: diff --git a/examples/backend_integration/Step3_modify_gen.py b/examples/backend_integration/Step3_modify_gen.py index 8ec174f34..b3d45eddc 100644 --- a/examples/backend_integration/Step3_modify_gen.py +++ b/examples/backend_integration/Step3_modify_gen.py @@ -67,7 +67,7 @@ def generators_info(self)-> Tuple[np.ndarray, np.ndarray, np.ndarray]: if __name__ == "__main__": import grid2op import os - from Step0_make_env import make_env_for_backend + from Step0_make_env import make_env_for_backend, create_action path_grid2op = grid2op.__file__ path_data_test = os.path.join(os.path.split(path_grid2op)[0], "data") @@ -103,8 +103,7 @@ def generators_info(self)-> Tuple[np.ndarray, np.ndarray, np.ndarray]: # have the proper size) # this is technical to grid2op (done internally) - bk_act = env._backend_action_class() - bk_act += action + bk_act = create_action(env, backend, action) ############# # this is what the backend receive: diff --git a/examples/backend_integration/Step4_modify_line_status.py b/examples/backend_integration/Step4_modify_line_status.py index e4e7c5057..3fabdb5c6 100644 --- a/examples/backend_integration/Step4_modify_line_status.py +++ b/examples/backend_integration/Step4_modify_line_status.py @@ -178,7 +178,7 @@ def lines_ex_info(self)-> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: if __name__ == "__main__": import grid2op import os - from Step0_make_env import make_env_for_backend + from Step0_make_env import make_env_for_backend, create_action path_grid2op = grid2op.__file__ path_data_test = os.path.join(os.path.split(path_grid2op)[0], "data") @@ -205,8 +205,7 @@ def lines_ex_info(self)-> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: action = env.action_space({"set_line_status": [(0, -1)]}) # this is technical to grid2op - bk_act = env._backend_action_class() - bk_act += action + bk_act = create_action(env, backend, action) ############# # this is what the backend receive: diff --git a/examples/backend_integration/Step5_modify_topology.py b/examples/backend_integration/Step5_modify_topology.py index c582aae9d..4e84a58e7 100644 --- a/examples/backend_integration/Step5_modify_topology.py +++ b/examples/backend_integration/Step5_modify_topology.py @@ -58,7 +58,7 @@ def _aux_change_bus_or_disconnect(self, new_bus, dt, key, el_id): # are either 1 or 2) def apply_action(self, backendAction: Union["grid2op.Action._backendAction._BackendAction", None]) -> None: # the following few lines are highly recommended - if action is None: + if backendAction is None: return # loads and generators are modified in the previous script @@ -173,12 +173,12 @@ def get_topo_vect(self) -> np.ndarray: if __name__ == "__main__": import grid2op import os - from Step0_make_env import make_env_for_backend + from Step0_make_env import make_env_for_backend, create_action path_grid2op = grid2op.__file__ path_data_test = os.path.join(os.path.split(path_grid2op)[0], "data") - env_name = "l2rpn_wcci_2022_dev" + env_name = "rte_case5_example" # one of: # - rte_case5_example: the grid in the documentation (completely fake grid) # - l2rpn_case14_sandbox: inspired from IEEE 14 @@ -206,16 +206,16 @@ def get_topo_vect(self) -> np.ndarray: sub_id = 1 local_topo = (1, 2, 1, 2, 1, 2) elif env_name == "l2rpn_wcci_2022_dev": - sub_id = 3 - local_topo = (1, 2, 1, 2, 1) + raise RuntimeError("Storage units are not handled by the example backend, and there are some on the grid.") + # sub_id = 3 + # local_topo = (1, 2, 1, 2, 1) else: raise RuntimeError(f"Unknown grid2op environment name {env_name}") action = env.action_space({"set_bus": {"substations_id": [(sub_id, local_topo)]}}) ############################# # this is technical to grid2op - bk_act = env._backend_action_class() - bk_act += action + bk_act = create_action(env, backend, action) #################################### # this is what the backend receive: diff --git a/examples/backend_integration/Step6_integration.py b/examples/backend_integration/Step6_integration.py index 7518504b3..f17ff0cbf 100644 --- a/examples/backend_integration/Step6_integration.py +++ b/examples/backend_integration/Step6_integration.py @@ -12,7 +12,7 @@ interacts with it. """ - +from tqdm import tqdm from Step5_modify_topology import CustomBackend_Minimal @@ -60,11 +60,13 @@ ########### First "test" perform nothing and see what it gives done = False nb_step = 0 - while True: - obs, reward, done, info = env.step(env.action_space()) - if done: - break - nb_step += 1 + with tqdm() as pbar: + while True: + obs, reward, done, info = env.step(env.action_space()) + if done: + break + nb_step += 1 + pbar.update() print(f"{nb_step} steps have been made with your backend with do nothing") ########## Second "test" perform random actions every now and then @@ -72,18 +74,20 @@ obs = env.reset() done = False nb_step = 0 - while True: - if nb_step % 10 == 9: - # do a randome action sometime - act = env.action_space.sample() - else: - # do nothing most of the time - act = env.action_space() - obs, reward, done, info = env.step(act) - if done: - break - nb_step += 1 - print(f"{nb_step} steps have been made with your backend with random actions") + with tqdm() as pbar: + while True: + if nb_step % 10 == 9: + # do a randome action sometime + act = env.action_space.sample() + else: + # do nothing most of the time + act = env.action_space() + obs, reward, done, info = env.step(act) + if done: + break + nb_step += 1 + pbar.update() + print(f"{nb_step} steps have been made with your backend with some random actions") ########### Third "test" using an "agent" that "does smart actions" (greedy agent) done = False @@ -91,11 +95,13 @@ obs = env.reset() reward = 0. agent = RecoPowerlineAgent(env.action_space) - while True: - act = agent.act(obs, reward) - obs, reward, done, info = env.step(act) - if done: - break - nb_step += 1 + with tqdm() as pbar: + while True: + act = agent.act(obs, reward) + obs, reward, done, info = env.step(act) + if done: + break + nb_step += 1 + pbar.update() print(f"{nb_step} steps have been made with the greedy agent") \ No newline at end of file diff --git a/examples/backend_integration/Step7_optional_make_test.py b/examples/backend_integration/Step7_optional_make_test.py new file mode 100644 index 000000000..579b5b2bf --- /dev/null +++ b/examples/backend_integration/Step7_optional_make_test.py @@ -0,0 +1,87 @@ +# Copyright (c) 2019-2020, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +""" +This script provides a way to run the tests performed by grid2Op for the backend. + +These tests are not 100% complete (some things might not be tested and are tested somewhere else) +but they cover a big part of what the backend is expected to do. + +YOU NEED TO INSTALL GRID2OP FROM THE GITHUB REPO FOR THIS TO WORK ! +To do that, simply: + +1) clone grid2op repo +2) cd there +3) run `pip install -e .` + +(do this in a venv preferably) +""" + +import unittest +import warnings + +# first the backend class (for the example here) +from Step5_modify_topology import CustomBackend_Minimal + +# then some required things +from grid2op.tests.helper_path_test import PATH_DATA_TEST_PP, PATH_DATA_TEST +from grid2op.tests.helper_path_test import HelperTests +PATH_DATA_TEST_INIT = PATH_DATA_TEST +PATH_DATA_TEST = PATH_DATA_TEST_PP + +# then all the tests that can be automatically performed +from grid2op.tests.BaseBackendTest import BaseTestNames, BaseTestLoadingCase, BaseTestLoadingBackendFunc +from grid2op.tests.BaseBackendTest import BaseTestTopoAction, BaseTestEnvPerformsCorrectCascadingFailures +from grid2op.tests.BaseBackendTest import BaseTestChangeBusAffectRightBus, BaseTestShuntAction +from grid2op.tests.BaseBackendTest import BaseTestResetEqualsLoadGrid, BaseTestVoltageOWhenDisco, BaseTestChangeBusSlack +from grid2op.tests.BaseBackendTest import BaseIssuesTest, BaseStatusActions +from grid2op.tests.test_Environment import (TestLoadingBackendPandaPower as BaseTestLoadingBackendPandaPower, + TestResetOk as BaseTestResetOk) +from grid2op.tests.test_Environment import (TestResetAfterCascadingFailure as TestResetAfterCascadingFailure, + TestCascadingFailure as BaseTestCascadingFailure) +from grid2op.tests.BaseRedispTest import BaseTestRedispatch, BaseTestRedispatchChangeNothingEnvironment +from grid2op.tests.BaseRedispTest import BaseTestRedispTooLowHigh, BaseTestDispatchRampingIllegalETC +from grid2op.tests.BaseRedispTest import BaseTestLoadingAcceptAlmostZeroSumRedisp + +# then still some glue code, mainly for the names of the time series +from grid2op.Converter import BackendConverter +from grid2op.Backend import PandaPowerBackend + +# our backend does not read the names from the grid, so this test is not relevant +# class TestNames(HelperTests, BaseTestNames): +# def make_backend(self, detailed_infos_for_cascading_failures=False): +# with warnings.catch_warnings(): +# warnings.filterwarnings("ignore") +# bk = BackendConverter(source_backend_class=CustomBackend_Minimal, +# target_backend_class=PandaPowerBackend, +# use_target_backend_name=True, +# detailed_infos_for_cascading_failures=detailed_infos_for_cascading_failures) +# return bk + +# def get_path(self): +# return PATH_DATA_TEST_INIT + +class TestLoadingCase(HelperTests, BaseTestLoadingCase): + def make_backend(self, detailed_infos_for_cascading_failures=False): + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + bk = BackendConverter(source_backend_class=CustomBackend_Minimal, + target_backend_class=PandaPowerBackend, + use_target_backend_name=True, + detailed_infos_for_cascading_failures=detailed_infos_for_cascading_failures) + return bk + + def get_path(self): + return PATH_DATA_TEST + + def get_casefile(self): + return "test_case14.json" + + +if __name__ == "__main__": + unittest.main() diff --git a/getting_started/00_Introduction.ipynb b/getting_started/00_Introduction.ipynb index bf16a50db..a58f23b7b 100644 --- a/getting_started/00_Introduction.ipynb +++ b/getting_started/00_Introduction.ipynb @@ -191,10 +191,25 @@ "\n", "- In reality there can also be \"switches\" that can connect the two busbars (reconfiguring the topology of the substation can be done with only one switch, but on the other hand, sometimes changing one switch will have no effect at all).\n", "\n", - "- You can also have more than 2 busbars in each substation (sometimes 5 or 6 for example). This makes the number of possible topologies even higher than what it is in grid2op.\n", + "- You can also have more than 2 busbars in each substation (sometimes 5 or 6 for example). This makes the number of possible topologies even higher than it currently is in grid2op (see below for some additional precisions).\n", "\n", "- Finally, most of the time a single busbar count a \"switch\" in its middle that allows to disconnect part of the element connected to it to another part. Basically this entails that some combinaison of elements are not possible to perform\n", "\n", + "*Additional precisions about the number of independant busbsars per susbtations*: Starting from grid2op 1.10.2 you can now have any number of busbars you want per susbtations. For example, you can create an environment with:\n", + "```python\n", + "env = grid2op.make(\"l2rpn_case14_sandbox\")\n", + "```\n", + "To have the default of 2 busbars per susbtations. But you can also do:\n", + "```python\n", + "env_3 = grid2op.make(\"l2rpn_case14_sandbox\", n_busbar=3)\n", + "```\n", + "Then you end-up with 3 busbars for all substations or you can even do:\n", + "```python\n", + "env_1 = grid2op.make(\"l2rpn_case14_sandbox\", n_busbar=1)\n", + "# or\n", + "env_10 = grid2op.make(\"l2rpn_case14_sandbox\", n_busbar=10)\n", + "```\n", + "\n", "And of course, we model explicitly in this framework (*eg* we allow the agents to act on) only some elements of a powergrid. In reality, much more heterogeneous objects exists with more complex properties. \n", "\n", "We decided to make all these assumptions because we thought it was the easiest setting that allow to perform some topological reconfiguration, beside connecting / disconnecting powerlines.\n", diff --git a/getting_started/11_IntegrationWithExistingRLFrameworks.ipynb b/getting_started/11_IntegrationWithExistingRLFrameworks.ipynb index 561dd20b1..e4d2c6ecb 100644 --- a/getting_started/11_IntegrationWithExistingRLFrameworks.ipynb +++ b/getting_started/11_IntegrationWithExistingRLFrameworks.ipynb @@ -29,7 +29,6 @@ "\n", "Other RL frameworks are not cover here. If you already use them, let us know !\n", "- https://github.com/PaddlePaddle/PARL/blob/develop/README.md (used by the winner teams of Neurips competitions !) Work in progress.\n", - "- https://github.com/wau/keras-rl2\n", "- https://github.com/deepmind/acme\n", "\n", "Note also that there is still the possibility to use past codes in the l2rpn-baselines repository: https://github.com/rte-france/l2rpn-baselines . This repository contains code snippets that can be reuse to make really nice agents on the l2rpn competitions. You can try it out :-) \n", @@ -85,11 +84,13 @@ "- [Action space](#Action-space): basic usage of the action space, by removing redundant feature (`gym_env.observation_space.ignore_attr`) or transforming feature from a continuous space to a discrete space (`ContinuousToDiscreteConverter`)\n", "- [Observation space](#Observation-space): basic usage of the observation space, by removing redunddant features (`keep_only_attr`) or to scale the data on between a certain range (`ScalerAttrConverter`)\n", "- [Making the grid2op agent](#Making-the-grid2op-agent) explains how to make a grid2op agent once trained. Note that a more \"agent focused\" view is provided in the notebook [04_TrainingAnAgent](04_TrainingAnAgent.ipynb) !\n", - "- [1) RLLIB](#1\\)-RLLIB): more advance usage for customizing the observation space (`gym_env.observation_space.reencode_space` and `gym_env.observation_space.add_key`) or modifying the type of gym attribute (`MultiToTupleConverter`) as well as an example of how to use RLLIB framework\n", - "- [2)-Stable baselines](#2\\)-Stable-baselines): even more advanced usage for customizing the observation space by concatenating it to a single \"Box\" (instead of a dictionnary) thanks to `BoxGymObsSpace` and to use `BoxGymActSpace` if you are more focus on continuous actions and `MultiDiscreteActSpace` for discrete actions (**NB** in both case there will be loss of information as compared to regular grid2op actions! for example it will be harder to have a representation of the graph of the grid there)\n", - "- [3) Tf Agents](#3\\)-Tf-Agents) explains how to convert the action space into a \"Discrete\" gym space thanks to `DiscreteActSpace`\n", "\n", - "On each sections, we also explain concisely how to train the agent. Note that we did not spend any time on customizing the default agents and training scheme. It is then less than likely that these agents there" + "To dive deeper and with proper \"hands on\", you can refer to one of the following notebooks that uses real RL frameworks:\n", + "\n", + "1) RLLIB: see notebook [11_ray_integration](./11_ray_integration.ipynb) for more information about RLLIB\n", + "2) Stable baselines: see notebook [11_ray_integration](./11_stable_baselines3_integration.ipynb) for more information about stables-baselines3\n", + "3) tf agents: coming soon\n", + "4) acme: coming soon" ] }, { @@ -173,7 +174,6 @@ "We strongly encouraged you to leverage all the possibilities which includes (but are not limited to):\n", "- using \"lightsim2grid\" as a backend for a 10-15x speed up in the \"env.step(...)\" function\n", "- using \"MultifolderWithCache\" or \"env.chronics_handler.set_chunk(...)\" for faster \"env.reset(...)\" see https://grid2op.readthedocs.io/en/latest/environment.html#optimize-the-data-pipeline\n", - "- using \"SingleEnvMultiProcess\" for parrallel computation\n", "\n", "\n", "### Create a grid2op environment\n", @@ -438,6 +438,133 @@ "env_gym.observation_space" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For the next notebooks, we use the following environment wrapper:\n", + "\n", + "```python\n", + "from gymnasium import Env\n", + "from gymnasium.spaces import Discrete, MultiDiscrete, Box\n", + "import json\n", + "\n", + "import ray\n", + "from ray.rllib.algorithms.ppo import PPOConfig\n", + "from ray.rllib.algorithms import ppo\n", + "\n", + "from typing import Dict, Literal, Any\n", + "import copy\n", + "\n", + "import grid2op\n", + "from grid2op.gym_compat import GymEnv, BoxGymObsSpace, DiscreteActSpace, BoxGymActSpace, MultiDiscreteActSpace\n", + "from lightsim2grid import LightSimBackend\n", + "\n", + "\n", + "class Grid2opEnvWrapper(Env):\n", + " def __init__(self,\n", + " env_config: Dict[Literal[\"backend_cls\",\n", + " \"backend_options\",\n", + " \"env_name\",\n", + " \"env_is_test\",\n", + " \"obs_attr_to_keep\",\n", + " \"act_type\",\n", + " \"act_attr_to_keep\"],\n", + " Any]= None):\n", + " super().__init__()\n", + " if env_config is None:\n", + " env_config = {}\n", + "\n", + " # handle the backend\n", + " backend_cls = LightSimBackend\n", + " if \"backend_cls\" in env_config:\n", + " backend_cls = env_config[\"backend_cls\"]\n", + " backend_options = {}\n", + " if \"backend_options\" in env_config:\n", + " backend_options = env_config[\"backend_options\"]\n", + " backend = backend_cls(**backend_options)\n", + "\n", + " # create the grid2op environment\n", + " env_name = \"l2rpn_case14_sandbox\"\n", + " if \"env_name\" in env_config:\n", + " env_name = env_config[\"env_name\"]\n", + " if \"env_is_test\" in env_config:\n", + " is_test = bool(env_config[\"env_is_test\"])\n", + " else:\n", + " is_test = False\n", + " self._g2op_env = grid2op.make(env_name, backend=backend, test=is_test)\n", + " # NB by default this might be really slow (when the environment is reset)\n", + " # see https://grid2op.readthedocs.io/en/latest/data_pipeline.html for maybe 10x speed ups !\n", + " # TODO customize reward or action_class for example !\n", + "\n", + " # create the gym env (from grid2op)\n", + " self._gym_env = GymEnv(self._g2op_env)\n", + "\n", + " # customize observation space\n", + " obs_attr_to_keep = [\"rho\", \"p_or\", \"gen_p\", \"load_p\"]\n", + " if \"obs_attr_to_keep\" in env_config:\n", + " obs_attr_to_keep = copy.deepcopy(env_config[\"obs_attr_to_keep\"])\n", + " self._gym_env.observation_space.close()\n", + " self._gym_env.observation_space = BoxGymObsSpace(self._g2op_env.observation_space,\n", + " attr_to_keep=obs_attr_to_keep\n", + " )\n", + " # export observation space for the Grid2opEnv\n", + " self.observation_space = Box(shape=self._gym_env.observation_space.shape,\n", + " low=self._gym_env.observation_space.low,\n", + " high=self._gym_env.observation_space.high)\n", + "\n", + " # customize the action space\n", + " act_type = \"discrete\"\n", + " if \"act_type\" in env_config:\n", + " act_type = env_config[\"act_type\"]\n", + "\n", + " self._gym_env.action_space.close()\n", + " if act_type == \"discrete\":\n", + " # user wants a discrete action space\n", + " act_attr_to_keep = [\"set_line_status_simple\", \"set_bus\"]\n", + " if \"act_attr_to_keep\" in env_config:\n", + " act_attr_to_keep = copy.deepcopy(env_config[\"act_attr_to_keep\"])\n", + " self._gym_env.action_space = DiscreteActSpace(self._g2op_env.action_space,\n", + " attr_to_keep=act_attr_to_keep)\n", + " self.action_space = Discrete(self._gym_env.action_space.n)\n", + " elif act_type == \"box\":\n", + " # user wants continuous action space\n", + " act_attr_to_keep = [\"redispatch\", \"set_storage\", \"curtail\"]\n", + " if \"act_attr_to_keep\" in env_config:\n", + " act_attr_to_keep = copy.deepcopy(env_config[\"act_attr_to_keep\"])\n", + " self._gym_env.action_space = BoxGymActSpace(self._g2op_env.action_space,\n", + " attr_to_keep=act_attr_to_keep)\n", + " self.action_space = Box(shape=self._gym_env.action_space.shape,\n", + " low=self._gym_env.action_space.low,\n", + " high=self._gym_env.action_space.high)\n", + " elif act_type == \"multi_discrete\":\n", + " # user wants a multi-discrete action space\n", + " act_attr_to_keep = [\"one_line_set\", \"one_sub_set\"]\n", + " if \"act_attr_to_keep\" in env_config:\n", + " act_attr_to_keep = copy.deepcopy(env_config[\"act_attr_to_keep\"])\n", + " self._gym_env.action_space = MultiDiscreteActSpace(self._g2op_env.action_space,\n", + " attr_to_keep=act_attr_to_keep)\n", + " self.action_space = MultiDiscrete(self._gym_env.action_space.nvec)\n", + " else:\n", + " raise NotImplementedError(f\"action type '{act_type}' is not currently supported.\")\n", + " \n", + " def reset(self, seed=None, options=None):\n", + " # use default _gym_env (from grid2op.gym_compat module)\n", + " # NB: here you can also specify \"default options\" when you reset, for example:\n", + " # - limiting the duration of the episode \"max step\"\n", + " # - starting at different steps \"init ts\"\n", + " # - study difficult scenario \"time serie id\"\n", + " # - specify an initial state of your grid \"init state\"\n", + " return self._gym_env.reset(seed=seed, options=options)\n", + " \n", + " def step(self, action):\n", + " # use default _gym_env (from grid2op.gym_compat module)\n", + " return self._gym_env.step(action)\n", + " \n", + "```\n", + "\n" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -486,260 +613,9 @@ "source": [ "## 1) RLLIB\n", "\n", - "This part is not a tutorial on how to use rllib. Please refer to [their documentation](https://docs.ray.io/en/master/rllib.html) for more detailed information.\n", - "\n", - "As explained in the header of this notebook, we will follow the recommended usage:\n", - "1. Create a grid2op environment (see section [0) Recommended initial steps](#0\\)-Recommended-initial-steps))\n", - "2. Convert it to a gym environment (see section [0) Recommended initial steps](#0\\)-Recommended-initial-steps))\n", - "3. (optional) Customize the action space and observation space (see section [0) Recommended initial steps](#0\\)-Recommended-initial-steps))\n", - "4. Use the framework to train an agent **(only this part is framework specific)**\n", - "\n", - "\n", - "The issue with rllib is that it does not take into account MultiBinary nor MultiDiscrete action space (see \n", - "see https://github.com/ray-project/ray/issues/1519) so we need some way to encode these types of actions. This can be done automatically with the `MultiToTupleConverter` provided in grid2op (as always, more information [in the documentation](https://grid2op.readthedocs.io/en/latest/gym.html#grid2op.gym_compat.MultiToTupleConverter) ).\n", - "\n", - "We will then use this to customize our environment previously defined:\n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import copy\n", - "env_rllib = copy.deepcopy(env_gym_init)\n", - "from grid2op.gym_compat import MultiToTupleConverter\n", - "env_rllib.action_space = env_rllib.action_space.reencode_space(\"change_bus\", MultiToTupleConverter())\n", - "env_rllib.action_space = env_rllib.action_space.reencode_space(\"change_line_status\", MultiToTupleConverter())\n", - "env_rllib.action_space = env_rllib.action_space.reencode_space(\"redispatch\",\n", - " ContinuousToDiscreteConverter(nb_bins=11)\n", - " )\n", - "env_rllib.action_space = env_rllib.action_space.reencode_space(\"redispatch\", MultiToTupleConverter())\n", - "env_rllib.action_space" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Another specificity of RLLIB is that it handles creation of environments \"on its own\". This implies that you need to create a custom class representing an environment, rather a python object.\n", - "\n", - "And finally, you ask it to use this class, and learn a specific agent. This is really well explained in their documentation: https://docs.ray.io/en/master/rllib-env.html#configuring-environments." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# gym specific, we simply do a copy paste of what we did in the previous cells, wrapping it in the\n", - "# MyEnv class, and train a Proximal Policy Optimisation based agent\n", - "import gymnasium\n", - "import ray\n", - "import numpy as np\n", - " \n", - "class MyEnv(gymnasium.Env):\n", - " def __init__(self, env_config):\n", - " import grid2op\n", - " from grid2op.gym_compat import GymEnv\n", - " from grid2op.gym_compat import ScalerAttrConverter, ContinuousToDiscreteConverter, MultiToTupleConverter\n", - "\n", - " # 1. create the grid2op environment\n", - " if not \"env_name\" in env_config:\n", - " raise RuntimeError(\"The configuration for RLLIB should provide the env name\")\n", - " nm_env = env_config[\"env_name\"]\n", - " del env_config[\"env_name\"]\n", - " self.env_glop = grid2op.make(nm_env, **env_config)\n", - "\n", - " # 2. create the gym environment\n", - " self.env_gym = GymEnv(self.env_glop)\n", - " obs_gym, info = self.env_gym.reset()\n", - "\n", - " # 3. (optional) customize it (see section above for more information)\n", - " ## customize action space\n", - " self.env_gym.action_space = self.env_gym.action_space.ignore_attr(\"set_bus\").ignore_attr(\"set_line_status\")\n", - " self.env_gym.action_space = self.env_gym.action_space.reencode_space(\"redispatch\",\n", - " ContinuousToDiscreteConverter(nb_bins=11)\n", - " )\n", - " self.env_gym.action_space = self.env_gym.action_space.reencode_space(\"change_bus\", MultiToTupleConverter())\n", - " self.env_gym.action_space = self.env_gym.action_space.reencode_space(\"change_line_status\",\n", - " MultiToTupleConverter())\n", - " self.env_gym.action_space = self.env_gym.action_space.reencode_space(\"redispatch\", MultiToTupleConverter())\n", - " ## customize observation space\n", - " ob_space = self.env_gym.observation_space\n", - " ob_space = ob_space.keep_only_attr([\"rho\", \"gen_p\", \"load_p\", \"topo_vect\", \"actual_dispatch\"])\n", - " ob_space = ob_space.reencode_space(\"actual_dispatch\",\n", - " ScalerAttrConverter(substract=0.,\n", - " divide=self.env_glop.gen_pmax\n", - " )\n", - " )\n", - " ob_space = ob_space.reencode_space(\"gen_p\",\n", - " ScalerAttrConverter(substract=0.,\n", - " divide=self.env_glop.gen_pmax\n", - " )\n", - " )\n", - " ob_space = ob_space.reencode_space(\"load_p\",\n", - " ScalerAttrConverter(substract=obs_gym[\"load_p\"],\n", - " divide=0.5 * obs_gym[\"load_p\"]\n", - " )\n", - " )\n", - " self.env_gym.observation_space = ob_space\n", - "\n", - " # 4. specific to rllib\n", - " self.action_space = self.env_gym.action_space\n", - " self.observation_space = self.env_gym.observation_space\n", - " \n", - " # 4. bis: to avoid other type of issues, we recommend to build the action space and observation\n", - " # space directly from the spaces class.\n", - " d = {k: v for k, v in self.env_gym.observation_space.spaces.items()}\n", - " self.observation_space = gymnasium.spaces.Dict(d)\n", - " a = {k: v for k, v in self.env_gym.action_space.items()}\n", - " self.action_space = gymnasium.spaces.Dict(a)\n", - "\n", - " def reset(self):\n", - " obs = self.env_gym.reset()\n", - " return obs\n", - "\n", - " def step(self, action):\n", - " obs, reward, done, info = self.env_gym.step(action)\n", - " return obs, reward, done, info" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "test = MyEnv({\"env_name\": \"l2rpn_case14_sandbox\"})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And now you can train it :" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "if nb_step_train: # remember: don't forge to change this number to perform an actual training !\n", - " from ray.rllib.agents import ppo # import the type of agents\n", - " # nb_step_train = 100 # Do not forget to turn on the actual training !\n", - " # fist initialize ray\n", - " \n", - " try:\n", - " # then define a \"trainer\"\n", - " trainer = ppo.PPOTrainer(env=MyEnv, config={\n", - " \"env_config\": {\"env_name\":\"l2rpn_case14_sandbox\"}, # config to pass to env class\n", - " })\n", - " # and then train it for a given number of iteration\n", - " for step in range(nb_step_train):\n", - " trainer.train()\n", - " finally: \n", - " # shutdown ray\n", - " ray.shutdown()\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Because we are approximating a physical system with real equations, and limited computational power\n", - "regardless of the \"backend\" / \"powergrid simulator\" used internally by grid2op, it is sometimes possible\n", - "that an observation obs[\"gen_p\"] is not exactly in the range \n", - "env.observation_space[\"gen_p\"].low, env.observation_space[\"gen_p\"].high.\n", - "\n", - "In this \"pathological\" cases we recommend to manually change the low / high value of the `gen_p` part of the observation space, for example by adding, after the definition of self.observation_space something like:\n", - "\n", - "```python\n", - " # 4. specific to rllib\n", - " self.action_space = self.env_gym.action_space\n", - " self.observation_space = self.env_gym.observation_space\n", - " self.observation_space[\"gen_p\"].low[:] = -np.inf\n", - " self.observation_space[\"gen_p\"].high[:] = np.inf\n", - "```\n", - "\n", - "More information at https://github.com/rte-france/Grid2Op/issues/196\n", - "\n", - "**NB** these cases can be spotted with an error like:\n", - "\n", - "```\n", - "RayTaskError(ValueError): ray::RolloutWorker.par_iter_next() (pid=378, ip=172.28.0.2)\n", - " File \"python/ray/_raylet.pyx\", line 480, in ray._raylet.execute_task\n", - " File \"python/ray/_raylet.pyx\", line 432, in ray._raylet.execute_task.function_executor\n", - " File \"/usr/local/lib/python3.7/dist-packages/ray/util/iter.py\", line 1152, in par_iter_next\n", - " return next(self.local_it)\n", - " File \"/usr/local/lib/python3.7/dist-packages/ray/rllib/evaluation/rollout_worker.py\", line 327, in gen_rollouts\n", - " yield self.sample()\n", - " File \"/usr/local/lib/python3.7/dist-packages/ray/rllib/evaluation/rollout_worker.py\", line 662, in sample\n", - " batches = [self.input_reader.next()]\n", - " File \"/usr/local/lib/python3.7/dist-packages/ray/rllib/evaluation/sampler.py\", line 95, in next\n", - " batches = [self.get_data()]\n", - " File \"/usr/local/lib/python3.7/dist-packages/ray/rllib/evaluation/sampler.py\", line 224, in get_data\n", - " item = next(self.rollout_provider)\n", - " File \"/usr/local/lib/python3.7/dist-packages/ray/rllib/evaluation/sampler.py\", line 620, in _env_runner\n", - " sample_collector=sample_collector,\n", - " File \"/usr/local/lib/python3.7/dist-packages/ray/rllib/evaluation/sampler.py\", line 1056, in _process_observations_w_trajectory_view_api\n", - " policy_id).transform(raw_obs)\n", - " File \"/usr/local/lib/python3.7/dist-packages/ray/rllib/models/preprocessors.py\", line 257, in transform\n", - " self.check_shape(observation)\n", - " File \"/usr/local/lib/python3.7/dist-packages/ray/rllib/models/preprocessors.py\", line 68, in check_shape\n", - " observation, self._obs_space)\n", - "ValueError: ('Observation ({}) outside given space ({})!', OrderedDict([('actual_dispatch', array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0., 0.], dtype=float32)), ('gen_p', array([0. , 0.14583334, 0. , 0.5376 , 0. ,\n", - " 0.13690476, 0. , 0. , 0.13988096, 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0.10416667, 0. , 0.9975 ,\n", - " 0. , 0.0872582 ], dtype=float32)), ('load_p', array([-8.33333358e-02, 1.27543859e+01, -3.14843726e+00, -4.91228588e-02,\n", - " -7.84314200e-02, 2.70270016e-02, 4.51001197e-01, -7.63358772e-02,\n", - " -8.42104480e-02, -7.90961310e-02, -2.31212564e-02, -7.31706619e-02,\n", - " -5.47945984e-02, -5.57769537e-02, -4.65115122e-02, 0.00000000e+00,\n", - " -6.25000373e-02, -2.98508592e-02, 0.00000000e+00, 2.59741265e-02,\n", - " -5.12821227e-02, 2.12766770e-02, -4.38757129e-02, 1.45455096e-02,\n", - " -1.45278079e-02, -3.63636017e-02, 7.14286715e-02, 1.03358915e-02,\n", - " 8.95522386e-02, 4.81927246e-02, -1.76759213e-02, 1.11111533e-02,\n", - " 1.00000061e-01, -5.28445065e-01, 3.00833374e-01, 7.76839375e-01,\n", - " -7.07498193e-01], dtype=float32)), ('rho', array([0.49652272, 0.42036632, 0.12563582, 0.22375877, 0.54946697,\n", - " 0.08844228, 0.05907034, 0.10975129, 0.13002895, 0.14068729,\n", - " 0.17318982, 0.6956544 , 0.38796344, 0.67179894, 0.22992906,\n", - " 0.25189328, 0.15049867, 0.09095841, 0.35627988, 0.35627988,\n", - " 0.36776555, 0.27249542, 0.6269728 , 0.62393713, 0.3464659 ,\n", - " 0.35879263, 0.22755426, 0.35994047, 0.36117986, 0.12019955,\n", - " 0.03638522, 0.2805753 , 0.5809281 , 0.6191531 , 0.5243356 ,\n", - " 0.60382956, 0.35834518, 0.35867074, 0.3580954 , 0.6681824 ,\n", - " 0.3441911 , 0.6081861 , 0.34460714, 0.18246886, 0.10307808,\n", - " 0.46778303, 0.47179568, 0.45407027, 0.30089107, 0.30089107,\n", - " 0.34481782, 0.3182735 , 0.35940355, 0.21895139, 0.19766088,\n", - " 0.63653564, 0.46778303, 0.4566811 , 0.64398617], dtype=float32)), ('topo_vect', array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", - " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", - " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", - " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", - " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", - " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", - " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", - " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", - " 1], dtype=int32))]), Dict(actual_dispatch:Box(-1.0, 1.0, (22,), float32), gen_p:Box(0.0, 1.2000000476837158, (22,), float32), load_p:Box(-inf, inf, (37,), float32), rho:Box(0.0, inf, (59,), float32), topo_vect:Box(-1, 2, (177,), int32)))\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**NB** We want to emphasize here that:\n", - "- This encoding is far from being suitable here. It is shown as an example, mainly to demonstrate the use of some of the gym_compat module\n", - "- The actions in particular are not really suited here. Actions in grid2op are relatively complex and encoding them this way does not seem like a great idea. For example, with this encoding, the agent will have to learn that it cannot act on more than 2 lines or two substations at the same time...\n", - "- The \"PPO\" agent shown here, with some default parameters is unlikely to lead to a good agent. You might want to read litterature on past L2RPN agents or draw some inspiration from L2RPN baselines packages for more information.\n", + "To make it easier to get started, we moved this into the notebook [11_ray_integration](./11_ray_integration.ipynb)\n", "\n", - " For a better \"usecase\" of the PPO agent using RLLIB we strongly encourage you to check out the \"PPO_RLLIB\" agent of l2rpn_baselines package. " + "Please have a look at this notebook for more information." ] }, { @@ -748,273 +624,15 @@ "source": [ "## 2) Stable baselines\n", "\n", - "This part is not a tutorial on how to use stable baselines. Please refer to [their documentation](https://stable-baselines3.readthedocs.io/en/master/) for more detailed information.\n", - "\n", - "As explained in the header of this notebook, we will follow the recommended usage:\n", - "1. Create a grid2op environment (see section [0) Recommended initial steps](#0\\)-Recommended-initial-steps))\n", - "2. Convert it to a gym environment (see section [0) Recommended initial steps](#0\\)-Recommended-initial-steps))\n", - "3. (optional) Customize the action space and observation space (see section [0) Recommended initial steps](#0\\)-Recommended-initial-steps))\n", - "4. Use the framework to train an agent **(only this part is framework specific)**\n", + "To make it easier to get started, we moved this into the notebook [11_stable_baselines3_integration](./11_stable_baselines3_integration.ipynb)\n", "\n", - "\n", - "The issue with stable beselines 3 is that it expects standard action / observation types as explained there:\n", - "https://stable-baselines3.readthedocs.io/en/master/guide/algos.html#rl-algorithms\n", - "\n", - "> Non-array spaces such as Dict or Tuple are not currently supported by any algorithm.\n", - "\n", - "Unfortunately, it's not possible to convert without any \"loss of information\" an action space of dictionnary type to a vector.\n", - "\n", - "It is possible to use the grid2op framework in such cases, and in this section, we will explain how.\n", - "\n", - "\n", - "First, as always, we convert the grid2op environment in a gym environment." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env_sb = GymEnv(env_glop) # sb for \"stable baselines\"\n", - "glop_obs = env_glop.reset()" + "Please have a look at this notebook for more information." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Then, we need to convert everything into a \"Box\" as it is the only things that stable baselines seems to digest at time of writing (March 20201).\n", - "\n", - "### Observation Space\n", - "\n", - "We explain here how we convert an observation as a single Box. This step is rather easy, you just need to specify which attributes of the observation you want to keep and if you want so scale them (with the keword `subtract` and `divide`)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from grid2op.gym_compat import BoxGymObsSpace\n", - "env_sb.observation_space = BoxGymObsSpace(env_sb.init_env.observation_space,\n", - " attr_to_keep=[\"gen_p\", \"load_p\", \"topo_vect\",\n", - " \"rho\", \"actual_dispatch\", \"connectivity_matrix\"],\n", - " divide={\"gen_p\": env_glop.gen_pmax,\n", - " \"load_p\": glop_obs.load_p,\n", - " \"actual_dispatch\": env_glop.gen_pmax},\n", - " functs={\"connectivity_matrix\": (\n", - " lambda grid2obs: grid2obs.connectivity_matrix().flatten(),\n", - " 0., 1., None, None,\n", - " )\n", - " }\n", - " )\n", - "obs_gym, info = env_sb.reset()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "obs_gym in env_sb.observation_space" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**NB**: the above code is equivalent to something like:\n", - "\n", - "```python\n", - "from gym.spaces import Box\n", - "class BoxGymObsSpaceExample(Box):\n", - " def __init__(self, observation_space)\n", - " shape = observation_space.n_gen + \\ # dimension of gen_p\n", - " observation_space.n_load + \\ # load_p\n", - " observation_space.dim_topo + \\ # topo_vect\n", - " observation_space.n_line + \\ # rho\n", - " observation_space.n_gen + \\ # actual_dispatch\n", - " observation_space.dim_topo ** 2 # connectivity_matrix\n", - " \n", - " ob_sp = observation_space\n", - " # lowest value the attribute can take (see doc for more information)\n", - " low = np.concatenate((np.full(shape=(ob_sp.n_gen,), fill_value=0., dtype=dt_float), # gen_p\n", - " np.full(shape=(ob_sp.n_load,), fill_value=-np.inf, dtype=dt_float), # load_p\n", - " np.full(shape=(ob_sp.dim_topo,), fill_value=-1., dtype=dt_float), # topo_vect\n", - " np.full(shape=(ob_sp.n_line,), fill_value=0., dtype=dt_float), # rho\n", - " np.full(shape=(ob_sp.n_line,), fill_value=-ob_sp.gen_pmax, dtype=dt_float), # actual_dispatch\n", - " np.full(shape=(ob_sp.dim_topo**2,), fill_value=0., dtype=dt_float), # connectivity_matrix\n", - " ))\n", - " \n", - " # highest value the attribute can take\n", - " high = np.concatenate((np.full(shape=(ob_sp.n_gen,), fill_value=np.inf, dtype=dt_float), # gen_p\n", - " np.full(shape=(ob_sp.n_load,), fill_value=np.inf, dtype=dt_float), # load_p\n", - " np.full(shape=(ob_sp.dim_topo,), fill_value=2., dtype=dt_float), # topo_vect\n", - " np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float), # rho\n", - " np.full(shape=(ob_sp.n_line,), fill_value=ob_sp.gen_pmax, dtype=dt_float), # actual_dispatch\n", - " np.full(shape=(ob_sp.dim_topo**2,), fill_value=1., dtype=dt_float), # connectivity_matrix\n", - " ))\n", - " Box.__init__(self, low=low, high=high, shape=shape)\n", - " \n", - " def to_gym(self, observation):\n", - " res = np.concatenate((obs.gen_p / obs.gen_pmax,\n", - " obs.prod_p / glop_obs.load_p,\n", - " obs.topo_vect.astype(float),\n", - " obs.rho,\n", - " obs.actual_dispatch / env_glop.gen_pmax,\n", - " obs.connectivity_matrix().flatten()\n", - " ))\n", - " return res\n", - "```\n", - "\n", - "So if you want more customization, but making less generic code (the `BoxGymObsSpace` works for all the attribute of the observation) you can customize it by adapting the snippet above or read the documentation here (TODO).\n", - "\n", - "Only the \"to_gym\" function, and this exact signature is important in this case. It should take an observation in a grid2op format and return this same observation compatible with the gym Box (so a numpy array with the right shape and in the right range)\n", - " \n", - "\n", - "### Action space\n", - "\n", - "Converting the grid2op actions in something that is not a Tuple, nor a Dict. The main restriction in these frameworks is that they do not allow for easy integration of environment where both discrete actions and continuous actions are possible.\n", - "\n", - "\n", - "#### Using a BoxGymActSpace\n", - "\n", - "We can use the same kind of method explained above with the use of the class `BoxGymActSpace`. In this case, you need to provide a way to convert a numpy array (an element of a gym Box) into a grid2op action.\n", - "\n", - "**NB** This method is particularly suited if you want to focus on CONTINUOUS part of the action space, for example redispatching, curtailment or action on storage unit.\n", - "\n", - "Though we made it possible to also use discrete action, we do not recommend to use it. Prefer using the `MultiDiscreteActSpace` for such purpose." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from grid2op.gym_compat import BoxGymActSpace\n", - "scaler_gen = env_sb.init_env.gen_max_ramp_up + env_sb.init_env.gen_max_ramp_down\n", - "scaler_gen = scaler_gen[env_sb.init_env.gen_redispatchable]\n", - "env_sb.action_space = BoxGymActSpace(env_sb.init_env.action_space,\n", - " attr_to_keep=[\"redispatch\"],\n", - " multiply={\"redispatch\": scaler_gen},\n", - " )\n", - "obs_gym, info = env_sb.reset()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**NB**: the above code is equivalent to something like:\n", - "\n", - "```python\n", - "from gym.spaces import Box\n", - "class BoxGymActSpace(Box):\n", - " def __init__(self, action_space)\n", - " shape = observation_space.n_gen # redispatch\n", - " \n", - " ob_sp = observation_space\n", - " # lowest value the attribute can take (see doc for more information)\n", - " low = np.full(shape=(ob_sp.n_gen,), fill_value=-1., dtype=dt_float)\n", - " \n", - " # highest value the attribute can take\n", - " high = np.full(shape=(ob_sp.n_gen,), fill_value=1., dtype=dt_float)\n", - " \n", - " Box.__init__(self, low=low, high=high, shape=shape)\n", - " \n", - " self.action_space = action_space\n", - " \n", - " def from_gym(self, gym_observation):\n", - " res = self.action_space()\n", - " res.redispatch = gym_observation * scale_gen\n", - " return res\n", - "```\n", - "\n", - "So if you want more customization, but making less generic code (the `BoxGymActSpace` works for all the attribute of the action) you can customize it by adapting the snippet above or read the documentation here (TODO). The only important method you need to code is the \"from_gym\" one that should take into account an action as sampled by the gym Box and return a grid2op action.\n", - "\n", - "\n", - "#### Using a MultiDiscreteActSpace\n", - "\n", - "We can use the same kind of method explained above with the use of the class `BoxGymActSpace`, but which is more suited to the discrete type of actions.\n", - "\n", - "In this case, you need to provide a way to convert a numpy array of integer (an element of a gym MultiDiscrete) into a grid2op action.\n", - "\n", - "**NB** This method is particularly suited if you want to focus on DISCRETE part of the action space, for example set_bus or change_line_status." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from grid2op.gym_compat import MultiDiscreteActSpace\n", - "reencoded_act_space = MultiDiscreteActSpace(env_sb.init_env.action_space,\n", - " attr_to_keep=[\"set_line_status\", \"set_bus\", \"redispatch\"])\n", - "env_sb.action_space = reencoded_act_space\n", - "obs_gym, info = env_sb.reset()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Wrapping all up and starting the training\n", - "\n", - "First, let's make sure our environment is compatible with stable baselines, thanks to their helper function.\n", - "\n", - "This means that " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from stable_baselines3.common.env_checker import check_env\n", - "check_env(env_sb)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "So as we see, the environment seems to be compatible with stable baselines. Now we can start the training." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from stable_baselines3 import PPO\n", - "model = PPO(\"MlpPolicy\", env_sb, verbose=1)\n", - "if nb_step_train:\n", - " model.learn(total_timesteps=nb_step_train)\n", - " # model.save(\"ppo_stable_baselines3\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Again, the goal of this section was not to demonstrate how to train a state of the art algorithm, but rather to demonstrate how to use grid2op with the stable baselines repository.\n", - "\n", - "Most importantly, the neural networks there are not customized for the environment, default parameters are used. This is unlikely to work at all !\n", - "\n", - "For more information and to use tips and tricks to get started with RL agents, the devs of \"stable baselines\" have done a really nice job. You can have some tips for training RL agents here\n", - "https://stable-baselines3.readthedocs.io/en/master/guide/rl_tips.html\n", - "and consult any of the resources listed there https://stable-baselines3.readthedocs.io/en/master/guide/rl.html\n", - "\n", - "\n", - " For a better \"usecase\" of the PPO agent using stable-baselines3 we strongly encourage you to check out the \"PPO_SB3\" agent of l2rpn_baselines package. \n", - "\n", "## 3) Tf Agents\n", "Lastly, the RL frameworks we will use is tf agents.\n", "\n", @@ -1316,7 +934,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.13" + "version": "3.8.10" } }, "nbformat": 4, diff --git a/getting_started/11_ray_integration.ipynb b/getting_started/11_ray_integration.ipynb index 9a20658aa..cac674aeb 100644 --- a/getting_started/11_ray_integration.ipynb +++ b/getting_started/11_ray_integration.ipynb @@ -19,22 +19,72 @@ "\n", "See also https://docs.ray.io/en/latest/rllib/package_ref/doc/ray.rllib.algorithms.algorithm_config.AlgorithmConfig.html for other details\n", "\n", - "This notebook is tested with grid2op 1.10 and ray 2.23 on an ubuntu 20.04 machine.\n", - "\n", + "This notebook is tested with grid2op 1.10.2 and ray 2.24.0 (python3.10) on an ubuntu 20.04 machine.\n", + "\n", + " We found that ray is highly \"unstable\". Documentation is not really on par with their developments rythm. Basically, this notebook works given the exact python version and ray version. If you change it then you might need to modify the calls to ray. \n", + "\n", + "It is organised as followed:\n", + "\n", + "- [0 Some tips to get started](#0-some-tips-to-get-started) : is a reminder on what you can do to make things work. Indeed, this notebook explains \"how to use grid2op with stable baselines\" but not \"how to create a working agent able to operate a real powergrid in real time with stable baselines\". We wish we could explain the later...\n", + "- [1 Create the \"Grid2opEnvWrapper\" class](#1-create-the-grid2openvwraper-class) : explain how to create the main grid2op env class that you can use a \"gymnasium\" environment. \n", + "- [2 Create an environment, and train a first policy](#2-create-an-environment-and-train-a-first-policy): show how to create an environment from the class above (is pretty easy)\n", + "- [3 Evaluate the trained agent ](#3-evaluate-the-trained-agent): show how to evaluate the trained \"agent\"\n", + "- [4 Some customizations](#4-some-customizations): explain how to perform some customization of your agent / environment / policy\n", + "## 0 Some tips to get started\n", + "\n", + " It is unlikely that \"simply\" using a RL algorithm on a grid2op environment will lead to good results for the vast majority of environments.\n", + "\n", + "To make RL algorithms work with more or less sucess you might want to:\n", + "\n", + " 1) ajust the observation space: in particular selecting the right information for your agent. Too much information\n", + " and the size of the observation space will blow up and your agent will not learn anything. Not enough\n", + " information and your agent will not be able to capture anything.\n", + " \n", + " 2) customize the action space: dealing with both discrete and continuous values is often a challenge. So maybe you want to focus on only one type of action. And in all cases, try to still reduce the amount of actions your\n", + " agent \n", + " can perform. Indeed, for \"larger\" grids (118 substations, as a reference the french grid counts more than 6.000\n", + " such substations...) and by limiting 2 busbars per substation (as a reference, for some subsations, you have more\n", + " than 12 such \"busbars\") your agent will have the opportunity to choose between more than 60.000 different discrete\n", + " actions each steps. This is way too large for current RL algorithm as far as we know (and proposed environment are\n", + " small in comparison to real one)\n", + " \n", + " 3) customize the reward: the default reward might not work great for you. Ultimately, what TSO's or ISO's want is\n", + " to operate the grid safely, as long as possible with a cost as low as possible. This is of course really hard to\n", + " catch everything in one single reward signal. Customizing the reward is also really important because the \"do\n", + " nothing\" policy often leads to really good results (much better than random actions) which makes exploration \n", + " different actions...). So you kind of want to incentivize your agent to perform some actions at some point.\n", + " \n", + " 4) use fast simulator: even if you target an industrial application with industry grade simulators, we still would\n", + " advise you to use (at early stage of training at least) fast simulator for the vast majority of the training\n", + " process and then maybe to fine tune on better one.\n", + " \n", + " 5) combine RL with some heuristics: it's super easy to implement things like \"if there is no issue, then do\n", + " nothing\". This can be quite time consuming to learn though. Don't hesitate to check out the \"l2rpn-baselines\"\n", + " repository for already \"kind of working\" heuristics\n", + " \n", + "And finally don't hesitate to check solution proposed by winners of past l2rpn competitions in l2rpn-baselines.\n", + "\n", + "You can also ask question on our discord or on our github." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ "\n", - "## 1 Create the \"Grid2opEnv\" class\n", + "## 1 Create the \"Grid2opEnvWrapper\" class\n", "\n", - "In the next cell, we define a custom environment (that will internally use the `GymEnv` grid2op class) that is needed for ray / rllib.\n", + "In the next cell, we define a custom environment (that will internally use the `GymEnv` grid2op class). It is not strictly needed\n", "\n", "Indeed, in order to work with ray / rllib you need to define a custom wrapper on top of the GymEnv wrapper. You then have:\n", "\n", "- self._g2op_env which is the default grid2op environment, receiving grid2op Action and producing grid2op Observation.\n", "- self._gym_env which is a the grid2op defined `gymnasium Environment` that cannot be directly used with ray / rllib\n", - "- `Grid2opEnv` which is a the wrapper on top of `self._gym_env` to make it usable with ray / rllib.\n", + "- `Grid2opEnvWrapper` which is a the wrapper on top of `self._gym_env` to make it usable with ray / rllib.\n", "\n", - "Ray / rllib expects the gymnasium environment to inherit from `gymnasium.Env` and to be initialized with a given configuration. This is why you need to create the `Grid2opEnv` wrapper on top of `GymEnv`.\n", + "Ray / rllib expects the gymnasium environment to inherit from `gymnasium.Env` and to be initialized with a given configuration. This is why you need to create the `Grid2opEnvWrapper` wrapper on top of `GymEnv`.\n", "\n", - "In the initialization of `Grid2opEnv`, the `env_config` variable is a dictionary that can take as key-word arguments:\n", + "In the initialization of `Grid2opEnvWrapper`, the `env_config` variable is a dictionary that can take as key-word arguments:\n", "\n", "- `backend_cls` : what is the class of the backend. If not provided, it will use `LightSimBackend` from the `lightsim2grid` package\n", "- `backend_options`: what options will be used to create the backend for your environment. Your backend will be created by calling\n", @@ -62,19 +112,21 @@ "source": [ "from gymnasium import Env\n", "from gymnasium.spaces import Discrete, MultiDiscrete, Box\n", + "import json\n", "\n", "import ray\n", "from ray.rllib.algorithms.ppo import PPOConfig\n", "from ray.rllib.algorithms import ppo\n", "\n", "from typing import Dict, Literal, Any\n", + "import copy\n", "\n", "import grid2op\n", "from grid2op.gym_compat import GymEnv, BoxGymObsSpace, DiscreteActSpace, BoxGymActSpace, MultiDiscreteActSpace\n", "from lightsim2grid import LightSimBackend\n", "\n", "\n", - "class Grid2opEnv(Env):\n", + "class Grid2opEnvWrapper(Env):\n", " def __init__(self,\n", " env_config: Dict[Literal[\"backend_cls\",\n", " \"backend_options\",\n", @@ -83,7 +135,7 @@ " \"obs_attr_to_keep\",\n", " \"act_type\",\n", " \"act_attr_to_keep\"],\n", - " Any]):\n", + " Any]= None):\n", " super().__init__()\n", " if env_config is None:\n", " env_config = {}\n", @@ -161,9 +213,13 @@ " else:\n", " raise NotImplementedError(f\"action type '{act_type}' is not currently supported.\")\n", " \n", - " \n", - " def reset(self, seed, options):\n", + " def reset(self, seed=None, options=None):\n", " # use default _gym_env (from grid2op.gym_compat module)\n", + " # NB: here you can also specify \"default options\" when you reset, for example:\n", + " # - limiting the duration of the episode \"max step\"\n", + " # - starting at different steps \"init ts\"\n", + " # - study difficult scenario \"time serie id\"\n", + " # - specify an initial state of your grid \"init state\"\n", " return self._gym_env.reset(seed=seed, options=options)\n", " \n", " def step(self, action):\n", @@ -176,23 +232,23 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now we init ray, because we need to." + "## 2 Create an environment, and train a first policy" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "ray.init()" + "Now we init ray, because we need to." ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "## 2 Make a default environment, and train a PPO agent for one iteration" + "ray.init()" ] }, { @@ -206,22 +262,23 @@ "\n", "# Construct a generic config object, specifying values within different\n", "# sub-categories, e.g. \"training\".\n", + "env_config = {}\n", "config = (PPOConfig().training(gamma=0.9, lr=0.01)\n", - " .environment(env=Grid2opEnv, env_config={})\n", + " .environment(env=Grid2opEnvWrapper, env_config=env_config)\n", " .resources(num_gpus=0)\n", " .env_runners(num_env_runners=0)\n", " .framework(\"tf2\")\n", " )\n", "\n", "# A config object can be used to construct the respective Algorithm.\n", - "rllib_algo = config.build()\n" + "rllib_algo = config.build()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Now we train it for one training iteration (might call `env.reset()` and `env.step()` multiple times)" + "Now we train it for one training iteration (might call `env.reset()` and `env.step()` multiple times, see ray's documentation for a better understanding of what happens here and don't hesitate to open an issue or a PR to explain it and we'll add it here, thanks)" ] }, { @@ -239,7 +296,189 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 3 Train a PPO agent using 2 \"runners\" to make the rollouts" + "## 3 Evaluate the trained agent\n", + "\n", + "This notebook is a simple quick introduction for stable baselines only. So we don't really recall everything that has been said previously.\n", + "\n", + "Please consult the section `0) Recommended initial steps` of the notebook [11_IntegrationWithExistingRLFrameworks](./11_IntegrationWithExistingRLFrameworks.ipynb) for more information.\n", + "\n", + "**TLD;DR** grid2op offers the possibility to test your agent on scenarios / episodes different from the one it has been trained. We greatly encourage you to use this functionality.\n", + "\n", + "There are two main ways to evaluate your agent:\n", + "\n", + "- you stay in the \"gymnasium\" world (see [here](#31-staying-in-the-gymnasium-ecosystem) ) and you evaluate your policy directly just like you would any other gymnasium compatible environment. Simple, easy but without support for some grid2op features\n", + "- you \"get back\" to the \"grid2op\" world (detailed [here](#32-using-the-grid2op-ecosystem)) by \"converting\" your NN policy into something that is able to output grid2op like action. This introduces yet again a \"wrapper\" but you can benefit from all grid2op features, such as the `Runner` to save an inspect what your policy has done.\n", + "\n", + " We show here just a simple examples to \"get easily started\". For much better working agents, you can have a look at l2rpn-baselines code. There you have classes that maps the environment, the agents etc. to grid2op directly (you don't have to copy paste any wrapper). \n", + "\n", + "\n", + "\n", + "### 3.1 staying in the gymnasium ecosystem\n", + "\n", + "You can do pretty much what you want, but you have to do it yourself, or use any of the \"Wrappers\" available in gymnasium https://gymnasium.farama.org/main/api/wrappers/ (*eg* https://gymnasium.farama.org/main/api/wrappers/misc_wrappers/#gymnasium.wrappers.RecordEpisodeStatistics) or in your RL framework.\n", + "\n", + "For the sake of simplicity, we show how to do things \"manually\" even though we do not recommend to do it like that." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "nb_episode_test = 2\n", + "seeds_test_env = (0, 1) # same size as nb_episode_test\n", + "seeds_test_agent = (3, 4) # same size as nb_episode_test\n", + "ts_ep_test = (0, 1) # same size as nb_episode_test\n", + "gym_env = Grid2opEnvWrapper(env_config)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ep_infos = {} # information that will be saved\n", + "\n", + "\n", + "for ep_test_num in range(nb_episode_test):\n", + " init_obs, init_infos = gym_env.reset(seed=seeds_test_env[ep_test_num],\n", + " options={\"time serie id\": ts_ep_test[ep_test_num]})\n", + " # TODO seed the agent, I did not found in ray doc how to do it\n", + " done = False\n", + " cum_reward = 0\n", + " step_survived = 0\n", + " obs = init_obs\n", + " while not done:\n", + " act = rllib_algo.compute_single_action(obs, explore=False)\n", + " obs, reward, terminated, truncated, info = gym_env.step(act)\n", + " step_survived += 1\n", + " cum_reward += float(reward)\n", + " done = terminated or truncated\n", + " ep_infos[ep_test_num] = {\"time serie id\": ts_ep_test[ep_test_num],\n", + " \"time serie folder\": gym_env._gym_env.init_env.chronics_handler.get_id(),\n", + " \"env seed\": seeds_test_env[ep_test_num],\n", + " \"agent seed\": seeds_test_agent[ep_test_num],\n", + " \"steps survived\": step_survived,\n", + " \"total steps\": int(gym_env._gym_env.init_env.max_episode_duration()),\n", + " \"cum reward\": cum_reward}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# \"prettyprint\" the dictionnary above\n", + "\n", + "print(json.dumps(ep_infos, indent=4))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As you might have seen, it's not easy this way to retrieve some useful information about the grid2op environment if these informations are not passed to the policy.\n", + "\n", + "For example, we need to call `gym_env._gym_env.init_env` to access the underlying grid2op environment... You have to convert some things from int32 or float32 to float or int otherwise json complains, you have to control yourself the seeds to have reproducible results etc.\n", + "\n", + "It's a quick way to have something working but it might be perfected." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3.2 using the grid2op ecosystem\n", + "\n", + "This second method brings it closer to grid2op ecosystem, you will be able to use it with the grid2op `Runner`, save the results and read it back with other tools such as grid2viz and do the evaluation in parrallel without too much trouble (and with high reproducibility).\n", + "\n", + "With this method, you build a grid2op agent and this agent can then be used like every other grid2op agent. For example you can compare it with heuristic agents, agent based on optimization etc.\n", + "\n", + "This way of doing things also allows you to customize when the neural network policy is used. For example, you might chose to use it only when the grid is \"unsafe\" (and if the grid is safe you use an \"expert\" rules).\n", + "\n", + "This is more flexible than the previous one." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from grid2op.Agent import BaseAgent\n", + "from grid2op.Runner import Runner\n", + "\n", + "class Grid2opAgentWrapper(BaseAgent):\n", + " def __init__(self,\n", + " gym_env: Grid2opEnvWrapper,\n", + " trained_agent):\n", + " self.gym_env = gym_env\n", + " BaseAgent.__init__(self, gym_env._gym_env.init_env.action_space)\n", + " self.trained_agent = trained_agent\n", + " \n", + " def act(self, obs, reward, done):\n", + " # you can customize it here to call the NN policy `trained_agent`\n", + " # only in some cases, depending on the observation for example\n", + " gym_obs = self.gym_env._gym_env.observation_space.to_gym(obs)\n", + " gym_act = self.trained_agent.compute_single_action(gym_obs, explore=False)\n", + " grid2op_act = self.gym_env._gym_env.action_space.from_gym(gym_act)\n", + " return grid2op_act\n", + " \n", + " def seed(self, seed):\n", + " # implement the seed function\n", + " # TODO\n", + " return" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "my_agent = Grid2opAgentWrapper(gym_env, rllib_algo)\n", + "runner = Runner(**gym_env._g2op_env.get_params_for_runner(),\n", + " agentClass=None,\n", + " agentInstance=my_agent)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "res = runner.run(nb_episode=nb_episode_test,\n", + " env_seeds=seeds_test_env,\n", + " agent_seeds=seeds_test_agent,\n", + " episode_id=ts_ep_test,\n", + " add_detailed_output=True\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "res" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4 some customizations\n", + "\n", + "### 4.1 Train a PPO agent using 2 \"runners\" to make the rollouts\n", + "\n", + "In this second example, we explain briefly how to train the model using 2 \"processes\". This is, the agent will interact with 2 agents at the same time during the \"rollout\" phases.\n", + "\n", + "But everything related to the training of the agent is still done on the main process (and in this case not using a GPU but only a CPU)." ] }, { @@ -250,9 +489,9 @@ "source": [ "# see https://docs.ray.io/en/latest/rllib/package_ref/doc/ray.rllib.algorithms.algorithm_config.AlgorithmConfig.html\n", "\n", - "# use multiple use multiple runners\n", + "# use multiple runners\n", "config2 = (PPOConfig().training(gamma=0.9, lr=0.01)\n", - " .environment(env=Grid2opEnv, env_config={})\n", + " .environment(env=Grid2opEnvWrapper, env_config={})\n", " .resources(num_gpus=0)\n", " .env_runners(num_env_runners=2, num_envs_per_env_runner=1, num_cpus_per_env_runner=1)\n", " .framework(\"tf2\")\n", @@ -282,9 +521,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 4 Use non default parameters to make the l2rpn environment\n", + "### 4.2 Use non default parameters to make the grid2op environment\n", "\n", - "In this first example, we will train a policy using the \"box\" action space." + "In this third example, we will train a policy using the \"box\" action space, and on another environment (`l2rpn_idf_2023` instead of `l2rpn_case14_sandbox`)" ] }, { @@ -296,12 +535,12 @@ "# see https://docs.ray.io/en/latest/rllib/package_ref/doc/ray.rllib.algorithms.algorithm_config.AlgorithmConfig.html\n", "\n", "# Use a \"Box\" action space (mainly to use redispatching, curtailment and storage units)\n", - "env_config = {\"env_name\": \"l2rpn_idf_2023\",\n", - " \"env_is_test\": True,\n", - " \"act_type\": \"box\",\n", - " }\n", + "env_config3 = {\"env_name\": \"l2rpn_idf_2023\",\n", + " \"env_is_test\": True,\n", + " \"act_type\": \"box\",\n", + " }\n", "config3 = (PPOConfig().training(gamma=0.9, lr=0.01)\n", - " .environment(env=Grid2opEnv, env_config=env_config)\n", + " .environment(env=Grid2opEnvWrapper, env_config=env_config3)\n", " .resources(num_gpus=0)\n", " .env_runners(num_env_runners=2, num_envs_per_env_runner=1, num_cpus_per_env_runner=1)\n", " .framework(\"tf2\")\n", @@ -348,7 +587,7 @@ " \"act_type\": \"multi_discrete\",\n", " }\n", "config4 = (PPOConfig().training(gamma=0.9, lr=0.01)\n", - " .environment(env=Grid2opEnv, env_config=env_config4)\n", + " .environment(env=Grid2opEnvWrapper, env_config=env_config4)\n", " .resources(num_gpus=0)\n", " .env_runners(num_env_runners=2, num_envs_per_env_runner=1, num_cpus_per_env_runner=1)\n", " .framework(\"tf2\")\n", @@ -378,7 +617,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 5 Customize the policy (number of layers, size of layers etc.)\n", + "### 4.3 Customize the policy (number of layers, size of layers etc.)\n", "\n", "This notebook does not aim at covering all possibilities offered by ray / rllib. For that you need to refer to the ray / rllib documentation.\n", "\n", @@ -395,7 +634,7 @@ "\n", "# Use a \"Box\" action space (mainly to use redispatching, curtailment and storage units)\n", "config5 = (PPOConfig().training(gamma=0.9, lr=0.01)\n", - " .environment(env=Grid2opEnv, env_config={})\n", + " .environment(env=Grid2opEnvWrapper, env_config={})\n", " .resources(num_gpus=0)\n", " .env_runners(num_env_runners=2, num_envs_per_env_runner=1, num_cpus_per_env_runner=1)\n", " .framework(\"tf2\")\n", diff --git a/getting_started/11_stable_baselines3_integration.ipynb b/getting_started/11_stable_baselines3_integration.ipynb new file mode 100644 index 000000000..68576bc80 --- /dev/null +++ b/getting_started/11_stable_baselines3_integration.ipynb @@ -0,0 +1,638 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "90b9341f", + "metadata": {}, + "source": [ + "# Grid2Op integration with stable baselines3 framework\n", + "\n", + "Try me out interactively with: [![Binder](./img/badge_logo.svg)](https://mybinder.org/v2/gh/rte-france/Grid2Op/master)\n", + "\n", + "\n", + "**objectives** This notebooks briefly explains how to use grid2op with stable baselines 3 RL framework. Make sure to read the previous notebook [11_IntegrationWithExistingRLFrameworks](./11_IntegrationWithExistingRLFrameworks.ipynb) for a deeper dive into what happens. We only show the working solution here.\n", + "\n", + " This explains the ideas and shows a \"self contained\" somewhat minimal example of use of stable baselines 3 framework with grid2op. It is not meant to be fully generic, code might need to be adjusted. \n", + "\n", + "This notebook is more an \"example of what works\" rather than a deep dive tutorial.\n", + "\n", + "See stable-baselines3.readthedocs.io/ for a more detailed information.\n", + "\n", + "This notebook is tested with grid2op 1.10 and stable baselines 2.3.2 on an ubuntu 20.04 machine.\n", + "\n", + "It is organised as followed:\n", + "\n", + "- [0 Some tips to get started](#0-some-tips-to-get-started) : is a reminder on what you can do to make things work. Indeed, this notebook explains \"how to use grid2op with stable baselines\" but not \"how to create a working agent able to operate a real powergrid in real time with stable baselines\". We wish we could explain the later...\n", + "- [1 Create the \"Grid2opEnvWrapper\" class](#1-create-the-grid2openvwraper-class) : explain how to create the main grid2op env class that you can use a \"gymnasium\" environment. \n", + "- [2 Create an environment, and train a first policy](#2-create-an-environment-and-train-a-first-policy): show how to create an environment from the class above (is pretty easy)\n", + "- [3 Evaluate the trained agent ](#3-evaluate-the-trained-agent): show how to evaluate the trained \"agent\"\n", + "- [4 Some customizations](#4-some-customizations): explain how to perform some customization of your agent / environment / policy\n", + "\n", + "## 0 Some tips to get started\n", + "\n", + " It is unlikely that \"simply\" using a RL algorithm on a grid2op environment will lead to good results for the vast majority of environments.\n", + "\n", + "To make RL algorithms work with more or less sucess you might want to:\n", + "\n", + " 1) ajust the observation space: in particular selecting the right information for your agent. Too much information\n", + " and the size of the observation space will blow up and your agent will not learn anything. Not enough\n", + " information and your agent will not be able to capture anything.\n", + " \n", + " 2) customize the action space: dealing with both discrete and continuous values is often a challenge. So maybe you want to focus on only one type of action. And in all cases, try to still reduce the amount of actions your\n", + " agent \n", + " can perform. Indeed, for \"larger\" grids (118 substations, as a reference the french grid counts more than 6.000\n", + " such substations...) and by limiting 2 busbars per substation (as a reference, for some subsations, you have more\n", + " than 12 such \"busbars\") your agent will have the opportunity to choose between more than 60.000 different discrete\n", + " actions each steps. This is way too large for current RL algorithm as far as we know (and proposed environment are\n", + " small in comparison to real one)\n", + " \n", + " 3) customize the reward: the default reward might not work great for you. Ultimately, what TSO's or ISO's want is\n", + " to operate the grid safely, as long as possible with a cost as low as possible. This is of course really hard to\n", + " catch everything in one single reward signal. Customizing the reward is also really important because the \"do\n", + " nothing\" policy often leads to really good results (much better than random actions) which makes exploration \n", + " different actions...). So you kind of want to incentivize your agent to perform some actions at some point.\n", + " \n", + " 4) use fast simulator: even if you target an industrial application with industry grade simulators, we still would\n", + " advise you to use (at early stage of training at least) fast simulator for the vast majority of the training\n", + " process and then maybe to fine tune on better one.\n", + " \n", + " 5) combine RL with some heuristics: it's super easy to implement things like \"if there is no issue, then do\n", + " nothing\". This can be quite time consuming to learn though. Don't hesitate to check out the \"l2rpn-baselines\"\n", + " repository for already \"kind of working\" heuristics\n", + " \n", + "And finally don't hesitate to check solution proposed by winners of past l2rpn competitions in l2rpn-baselines.\n", + "\n", + "You can also ask question on our discord or on our github.\n", + "\n", + "\n", + "## 1 Create the \"Grid2opEnvWrapper\" class\n", + "\n", + "### 1.1 Easy but not easily customizable" + ] + }, + { + "cell_type": "markdown", + "id": "ae59e1f5", + "metadata": {}, + "source": [ + "### 1.2 Similar to ray / rllib with same type of configuration\n", + "\n", + "In the next cell, we define a custom environment (that will internally use the `GymEnv` grid2op class) that is needed for ray / rllib.\n", + "\n", + "Indeed, in order to work with ray / rllib you need to define a custom wrapper on top of the GymEnv wrapper. You then have:\n", + "\n", + "- self._g2op_env which is the default grid2op environment, receiving grid2op Action and producing grid2op Observation.\n", + "- self._gym_env which is a the grid2op defined `gymnasium Environment` that cannot be directly used with ray / rllib\n", + "- `Grid2opEnv` which is a the wrapper on top of `self._gym_env` to make it usable with ray / rllib.\n", + "\n", + "Ray / rllib expects the gymnasium environment to inherit from `gymnasium.Env` and to be initialized with a given configuration. This is why you need to create the `Grid2opEnv` wrapper on top of `GymEnv`.\n", + "\n", + "In the initialization of `Grid2opEnv`, the `env_config` variable is a dictionary that can take as key-word arguments:\n", + "\n", + "- `backend_cls` : what is the class of the backend. If not provided, it will use `LightSimBackend` from the `lightsim2grid` package\n", + "- `backend_options`: what options will be used to create the backend for your environment. Your backend will be created by calling\n", + " `backend_cls(**backend_options)`, for example if you want to build `LightSimBackend(detailed_info_for_cascading_failure=False)` you can pass `{\"backend_cls\": LightSimBackend, \"backend_options\": {\"detailed_info_for_cascading_failure\": False}}`\n", + "- `env_name` : name of the grid2op environment you want to use, by default it uses `\"l2rpn_case14_sandbox\"`\n", + "- `env_is_test` : whether to add `test=True` when creating the grid2op environment (if `env_is_test` is True it will add `test=True` when calling `grid2op.make(..., test=True)`) otherwise it uses `test=False`\n", + "- `obs_attr_to_keep` : in this wrapper we only allow your agent to see a Box as an observation. This parameter allows you to control which attributes of the grid2op observation will be present in the agent observation space. By default it's `[\"rho\", \"p_or\", \"gen_p\", \"load_p\"]` which is \"kind of random\" and is probably not suited for every agent.\n", + "- `act_type` : controls the type of actions your agent will be able to perform. Already coded in this notebook are:\n", + " - `\"discrete\"` to use a `Discrete` action space\n", + " - `\"box\"` to use a `Box` action space\n", + " - `\"multi_discrete\"` to use a `MultiDiscrete` action space\n", + "- `act_attr_to_keep` : that allows you to customize the action space. If not provided, it defaults to:\n", + " - `[\"set_line_status_simple\", \"set_bus\"]` if `act_type` is `\"discrete\"` \n", + " - `[\"redispatch\", \"set_storage\", \"curtail\"]` if `act_type` is `\"box\"` \n", + " - `[\"one_line_set\", \"one_sub_set\"]` if `act_type` is `\"multi_discrete\"`\n", + "\n", + "If you want to add more customization, for example the reward function, the parameters of the environment etc. etc. feel free to get inspired by this code and extend it. Any PR on this regard is more than welcome." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "55e043a9", + "metadata": {}, + "outputs": [], + "source": [ + "import copy\n", + "from typing import Dict, Literal, Any\n", + "import json\n", + "\n", + "from gymnasium import Env\n", + "from gymnasium.spaces import Discrete, MultiDiscrete, Box\n", + "\n", + "import grid2op\n", + "from grid2op.gym_compat import GymEnv, BoxGymObsSpace, DiscreteActSpace, BoxGymActSpace, MultiDiscreteActSpace\n", + "from lightsim2grid import LightSimBackend\n", + "\n", + "\n", + "class Grid2opEnvWrapper(Env):\n", + " def __init__(self,\n", + " env_config: Dict[Literal[\"backend_cls\",\n", + " \"backend_options\",\n", + " \"env_name\",\n", + " \"env_is_test\",\n", + " \"obs_attr_to_keep\",\n", + " \"act_type\",\n", + " \"act_attr_to_keep\"],\n", + " Any] = None):\n", + " super().__init__()\n", + " if env_config is None:\n", + " env_config = {}\n", + "\n", + " # handle the backend\n", + " backend_cls = LightSimBackend\n", + " if \"backend_cls\" in env_config:\n", + " backend_cls = env_config[\"backend_cls\"]\n", + " backend_options = {}\n", + " if \"backend_options\" in env_config:\n", + " backend_options = env_config[\"backend_options\"]\n", + " backend = backend_cls(**backend_options)\n", + "\n", + " # create the grid2op environment\n", + " env_name = \"l2rpn_case14_sandbox\"\n", + " if \"env_name\" in env_config:\n", + " env_name = env_config[\"env_name\"]\n", + " if \"env_is_test\" in env_config:\n", + " is_test = bool(env_config[\"env_is_test\"])\n", + " else:\n", + " is_test = False\n", + " self._g2op_env = grid2op.make(env_name, backend=backend, test=is_test)\n", + " # NB by default this might be really slow (when the environment is reset)\n", + " # see https://grid2op.readthedocs.io/en/latest/data_pipeline.html for maybe 10x speed ups !\n", + " # TODO customize reward or action_class for example !\n", + "\n", + " # create the gym env (from grid2op)\n", + " self._gym_env = GymEnv(self._g2op_env)\n", + "\n", + " # customize observation space\n", + " obs_attr_to_keep = [\"rho\", \"p_or\", \"gen_p\", \"load_p\"]\n", + " if \"obs_attr_to_keep\" in env_config:\n", + " obs_attr_to_keep = copy.deepcopy(env_config[\"obs_attr_to_keep\"])\n", + " self._gym_env.observation_space.close()\n", + " self._gym_env.observation_space = BoxGymObsSpace(self._g2op_env.observation_space,\n", + " attr_to_keep=obs_attr_to_keep\n", + " )\n", + " # export observation space for the Grid2opEnv\n", + " self.observation_space = Box(shape=self._gym_env.observation_space.shape,\n", + " low=self._gym_env.observation_space.low,\n", + " high=self._gym_env.observation_space.high)\n", + "\n", + " # customize the action space\n", + " act_type = \"discrete\"\n", + " if \"act_type\" in env_config:\n", + " act_type = env_config[\"act_type\"]\n", + "\n", + " self._gym_env.action_space.close()\n", + " if act_type == \"discrete\":\n", + " # user wants a discrete action space\n", + " act_attr_to_keep = [\"set_line_status_simple\", \"set_bus\"]\n", + " if \"act_attr_to_keep\" in env_config:\n", + " act_attr_to_keep = copy.deepcopy(env_config[\"act_attr_to_keep\"])\n", + " self._gym_env.action_space = DiscreteActSpace(self._g2op_env.action_space,\n", + " attr_to_keep=act_attr_to_keep)\n", + " self.action_space = Discrete(self._gym_env.action_space.n)\n", + " elif act_type == \"box\":\n", + " # user wants continuous action space\n", + " act_attr_to_keep = [\"redispatch\", \"set_storage\", \"curtail\"]\n", + " if \"act_attr_to_keep\" in env_config:\n", + " act_attr_to_keep = copy.deepcopy(env_config[\"act_attr_to_keep\"])\n", + " self._gym_env.action_space = BoxGymActSpace(self._g2op_env.action_space,\n", + " attr_to_keep=act_attr_to_keep)\n", + " self.action_space = Box(shape=self._gym_env.action_space.shape,\n", + " low=self._gym_env.action_space.low,\n", + " high=self._gym_env.action_space.high)\n", + " elif act_type == \"multi_discrete\":\n", + " # user wants a multi-discrete action space\n", + " act_attr_to_keep = [\"one_line_set\", \"one_sub_set\"]\n", + " if \"act_attr_to_keep\" in env_config:\n", + " act_attr_to_keep = copy.deepcopy(env_config[\"act_attr_to_keep\"])\n", + " self._gym_env.action_space = MultiDiscreteActSpace(self._g2op_env.action_space,\n", + " attr_to_keep=act_attr_to_keep)\n", + " self.action_space = MultiDiscrete(self._gym_env.action_space.nvec)\n", + " else:\n", + " raise NotImplementedError(f\"action type '{act_type}' is not currently supported.\")\n", + " \n", + " \n", + " def reset(self, seed=None, options=None):\n", + " # use default _gym_env (from grid2op.gym_compat module)\n", + " # NB: here you can also specify \"default options\" when you reset, for example:\n", + " # - limiting the duration of the episode \"max step\"\n", + " # - starting at different steps \"init ts\"\n", + " # - study difficult scenario \"time serie id\"\n", + " # - specify an initial state of your grid \"init state\"\n", + " return self._gym_env.reset(seed=seed, options=options)\n", + " \n", + " def step(self, action):\n", + " # use default _gym_env (from grid2op.gym_compat module)\n", + " return self._gym_env.step(action)\n", + " " + ] + }, + { + "cell_type": "markdown", + "id": "a93964d8", + "metadata": {}, + "source": [ + "## 2 Create an environment, and train a first policy\n", + "\n", + "In this section we quickly show :\n", + "\n", + "- how to create the gym environment, which is an instance from `Grid2opEnvWrapper` defined above\n", + "- how to train a PPO policy using stable baselines3\n", + "\n", + "This part, for stable baselines is really small." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "38629107", + "metadata": {}, + "outputs": [], + "source": [ + "from stable_baselines3 import PPO\n", + "\n", + "gym_env = Grid2opEnvWrapper()\n", + "sb3_algo1 = PPO(\"MlpPolicy\", gym_env, verbose=0)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "89be6372", + "metadata": {}, + "outputs": [], + "source": [ + "sb3_algo1.learn(total_timesteps=1024)" + ] + }, + { + "cell_type": "markdown", + "id": "3a8f9717", + "metadata": {}, + "source": [ + "## 3 Evaluate the trained agent\n", + "\n", + "This notebook is a simple quick introduction for stable baselines only. So we don't really recall everything that has been said previously.\n", + "\n", + "Please consult the section `0) Recommended initial steps` of the notebook [11_IntegrationWithExistingRLFrameworks](./11_IntegrationWithExistingRLFrameworks.ipynb) for more information.\n", + "\n", + "**TLD;DR** grid2op offers the possibility to test your agent on scenarios / episodes different from the one it has been trained. We greatly encourage you to use this functionality.\n", + "\n", + "There are two main ways to evaluate your agent:\n", + "\n", + "- you stay in the \"gymnasium\" world (see [here](#31-staying-in-the-gymnasium-ecosystem) ) and you evaluate your policy directly just like you would any other gymnasium compatible environment. Simple, easy but without support for some grid2op features\n", + "- you \"get back\" to the \"grid2op\" world (detailed [here](#32-using-the-grid2op-ecosystem)) by \"converting\" your NN policy into something that is able to output grid2op like action. This introduces yet again a \"wrapper\" but you can benefit from all grid2op features, such as the `Runner` to save an inspect what your policy has done.\n", + "\n", + " We show here just a simple examples to \"get easily started\". For much better working agents, you can have a look at l2rpn-baselines code. There you have classes that maps the environment, the agents etc. to grid2op directly (you don't have to copy paste any wrapper). \n", + "\n", + "\n", + "\n", + "### 3.1 staying in the gymnasium ecosystem\n", + "\n", + "You can do pretty much what you want, but you have to do it yourself, or use any of the \"Wrappers\" available in gymnasium https://gymnasium.farama.org/main/api/wrappers/ (*eg* https://gymnasium.farama.org/main/api/wrappers/misc_wrappers/#gymnasium.wrappers.RecordEpisodeStatistics) or in your RL framework.\n", + "\n", + "For the sake of simplicity, we show how to do things \"manually\" even though we do not recommend to do it like that." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "05f5e188", + "metadata": {}, + "outputs": [], + "source": [ + "nb_episode_test = 2\n", + "seeds_test_env = (0, 1) # same size as nb_episode_test\n", + "seeds_test_agent = (3, 4) # same size as nb_episode_test\n", + "ts_ep_test = (0, 1) # same size as nb_episode_test" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "da0e7990", + "metadata": {}, + "outputs": [], + "source": [ + "ep_infos = {} # information that will be saved\n", + "\n", + "\n", + "for ep_test_num in range(nb_episode_test):\n", + " init_obs, init_infos = gym_env.reset(seed=seeds_test_env[ep_test_num],\n", + " options={\"time serie id\": ts_ep_test[ep_test_num]})\n", + " sb3_algo1.set_random_seed(seeds_test_agent[ep_test_num])\n", + " done = False\n", + " cum_reward = 0\n", + " step_survived = 0\n", + " obs = init_obs\n", + " while not done:\n", + " act, _states = sb3_algo1.predict(obs, deterministic=True)\n", + " obs, reward, terminated, truncated, info = gym_env.step(act)\n", + " step_survived += 1\n", + " cum_reward += float(reward)\n", + " done = terminated or truncated\n", + " ep_infos[ep_test_num] = {\"time serie id\": ts_ep_test[ep_test_num],\n", + " \"time serie folder\": gym_env._gym_env.init_env.chronics_handler.get_id(),\n", + " \"env seed\": seeds_test_env[ep_test_num],\n", + " \"agent seed\": seeds_test_agent[ep_test_num],\n", + " \"steps survived\": step_survived,\n", + " \"total steps\": int(gym_env._gym_env.init_env.max_episode_duration()),\n", + " \"cum reward\": cum_reward}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3f248fdc", + "metadata": {}, + "outputs": [], + "source": [ + "# \"prettyprint\" the dictionnary above\n", + "\n", + "print(json.dumps(ep_infos, indent=4))" + ] + }, + { + "cell_type": "markdown", + "id": "8a32899b", + "metadata": {}, + "source": [ + "As you might have seen, it's not easy this way to retrieve some useful information about the grid2op environment if these informations are not passed to the policy.\n", + "\n", + "For example, we need to call `gym_env._gym_env.init_env` to access the underlying grid2op environment... You have to convert some things from int32 or float32 to float or int otherwise json complains, you have to control yourself the seeds to have reproducible results etc.\n", + "\n", + "It's a quick way to have something working but it might be perfected." + ] + }, + { + "cell_type": "markdown", + "id": "fde71911", + "metadata": {}, + "source": [ + "### 3.2 using the grid2op ecosystem\n", + "\n", + "This second method brings it closer to grid2op ecosystem, you will be able to use it with the grid2op `Runner`, save the results and read it back with other tools such as grid2viz and do the evaluation in parrallel without too much trouble (and with high reproducibility).\n", + "\n", + "With this method, you build a grid2op agent and this agent can then be used like every other grid2op agent. For example you can compare it with heuristic agents, agent based on optimization etc.\n", + "\n", + "This way of doing things also allows you to customize when the neural network policy is used. For example, you might chose to use it only when the grid is \"unsafe\" (and if the grid is safe you use an \"expert\" rules).\n", + "\n", + "This is more flexible than the previous one." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "50625005", + "metadata": {}, + "outputs": [], + "source": [ + "from grid2op.Agent import BaseAgent\n", + "from grid2op.Runner import Runner\n", + "\n", + "class Grid2opAgentWrapper(BaseAgent):\n", + " def __init__(self,\n", + " gym_env: Grid2opEnvWrapper,\n", + " trained_agent: PPO):\n", + " self.gym_env = gym_env\n", + " BaseAgent.__init__(self, gym_env._gym_env.init_env.action_space)\n", + " self.trained_agent = trained_agent\n", + " \n", + " def act(self, obs, reward, done):\n", + " # you can customize it here to call the NN policy `trained_agent`\n", + " # only in some cases, depending on the observation for example\n", + " gym_obs = self.gym_env._gym_env.observation_space.to_gym(obs)\n", + " gym_act, _states = self.trained_agent.predict(gym_obs, deterministic=True)\n", + " grid2op_act = self.gym_env._gym_env.action_space.from_gym(gym_act)\n", + " return grid2op_act\n", + " \n", + " def seed(self, seed):\n", + " # implement the seed function\n", + " if seed is None:\n", + " return\n", + " seed_int = int(seed)\n", + " if seed_int != seed:\n", + " raise RuntimeError(\"Seed must be convertible to an integer\")\n", + " self.trained_agent.set_random_seed(seed_int)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "99e84f4a", + "metadata": {}, + "outputs": [], + "source": [ + "my_agent = Grid2opAgentWrapper(gym_env, sb3_algo1)\n", + "runner = Runner(**gym_env._g2op_env.get_params_for_runner(),\n", + " agentClass=None,\n", + " agentInstance=my_agent)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18b461cb", + "metadata": {}, + "outputs": [], + "source": [ + "res = runner.run(nb_episode=nb_episode_test,\n", + " env_seeds=seeds_test_env,\n", + " agent_seeds=seeds_test_agent,\n", + " episode_id=ts_ep_test,\n", + " add_detailed_output=True\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fe880aac", + "metadata": {}, + "outputs": [], + "source": [ + "res" + ] + }, + { + "cell_type": "markdown", + "id": "6fce9ed9", + "metadata": {}, + "source": [ + "See the documentation or the notebook [05 StudyYourAgent](./05_StudyYourAgent.ipynb) on how to use grid2op tools to study your agent, its decisions etc." + ] + }, + { + "cell_type": "markdown", + "id": "49bf6095", + "metadata": {}, + "source": [ + "## 4 Some customizations\n", + "\n", + "### 4.1 Train a PPO agent using 4 \"runners\" to make the rollouts\n", + "\n", + "This, for now, only works on linux based computers. Hopefully this will work on windows and macos as soon as possible.\n", + "\n", + "This allows to use some \"parralellism\" during the training: your agent will interact \"at the same time\" with 4 environments allowing it to gather experience faster. But in this case, its training is always done in the \"main\" process." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f2036ac9", + "metadata": {}, + "outputs": [], + "source": [ + "from stable_baselines3.common.env_util import make_vec_env\n", + "vec_env = make_vec_env(lambda : Grid2opEnvWrapper(), n_envs=4)\n", + "sb3_algo2 = PPO(\"MlpPolicy\", vec_env, verbose=0)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1d8ac595", + "metadata": {}, + "outputs": [], + "source": [ + "sb3_algo2.learn(total_timesteps=1024)" + ] + }, + { + "cell_type": "markdown", + "id": "8fc163cd", + "metadata": {}, + "source": [ + "### 4.2 Use non default parameters to make the grid2op environment\n", + "\n", + "In this third example, we will train a policy using the \"box\" action space, and on another environment (`l2rpn_idf_2023` instead of `l2rpn_case14_sandbox`)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13740e53", + "metadata": {}, + "outputs": [], + "source": [ + "# see https://docs.ray.io/en/latest/rllib/package_ref/doc/ray.rllib.algorithms.algorithm_config.AlgorithmConfig.html\n", + "\n", + "# Use a \"Box\" action space (mainly to use redispatching, curtailment and storage units)\n", + "env_config3 = {\"env_name\": \"l2rpn_idf_2023\",\n", + " \"env_is_test\": True,\n", + " \"act_type\": \"box\",\n", + " }\n", + "gym_env3 = Grid2opEnvWrapper(env_config3)\n", + "sb3_algo3 = PPO(\"MlpPolicy\", gym_env3, verbose=0)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "93ac61ff", + "metadata": {}, + "outputs": [], + "source": [ + "sb3_algo3.learn(total_timesteps=1024)" + ] + }, + { + "cell_type": "markdown", + "id": "00790379", + "metadata": {}, + "source": [ + "And now a policy using the \"multi discrete\" action space: " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6cd44edb", + "metadata": {}, + "outputs": [], + "source": [ + "# see https://docs.ray.io/en/latest/rllib/package_ref/doc/ray.rllib.algorithms.algorithm_config.AlgorithmConfig.html\n", + "\n", + "# Use a \"Box\" action space (mainly to use redispatching, curtailment and storage units)\n", + "env_config4 = {\"env_name\": \"l2rpn_idf_2023\",\n", + " \"env_is_test\": True,\n", + " \"act_type\": \"multi_discrete\",\n", + " }\n", + "gym_env4 = Grid2opEnvWrapper(env_config4)\n", + "sb3_algo4 = PPO(\"MlpPolicy\", gym_env4, verbose=0)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d18be5ec", + "metadata": {}, + "outputs": [], + "source": [ + "sb3_algo4.learn(total_timesteps=1024)" + ] + }, + { + "cell_type": "markdown", + "id": "7cf2dd58", + "metadata": {}, + "source": [ + "### 4.3 Customize the policy (number of layers, size of layers etc.)\n", + "\n", + "This notebook does not aim at covering all possibilities offered by ray / rllib. For that you need to refer to the ray / rllib documentation.\n", + "\n", + "We will simply show how to change the size of the neural network used as a policy." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fa7cc345", + "metadata": {}, + "outputs": [], + "source": [ + "# see https://docs.ray.io/en/latest/rllib/package_ref/doc/ray.rllib.algorithms.algorithm_config.AlgorithmConfig.html\n", + "\n", + "gym_env5 = Grid2opEnvWrapper()\n", + "sb3_algo5 = PPO(\"MlpPolicy\",\n", + " gym_env5,\n", + " verbose=0,\n", + " policy_kwargs={\"net_arch\": [32, 32, 32]}\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "51d435e5", + "metadata": {}, + "outputs": [], + "source": [ + "sb3_algo5.learn(total_timesteps=1024)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/grid2op/Action/actionSpace.py b/grid2op/Action/actionSpace.py index 4ce24be68..2b55406e0 100644 --- a/grid2op/Action/actionSpace.py +++ b/grid2op/Action/actionSpace.py @@ -44,6 +44,7 @@ def __init__( gridobj, legal_action, actionClass=BaseAction, # need to be a base grid2op type (and not a type generated on the fly) + _local_dir_cls=None, ): """ INTERNAL USE ONLY @@ -71,7 +72,7 @@ def __init__( """ actionClass._add_shunt_data() actionClass._update_value_set() - SerializableActionSpace.__init__(self, gridobj, actionClass=actionClass) + SerializableActionSpace.__init__(self, gridobj, actionClass=actionClass, _local_dir_cls=_local_dir_cls) self.legal_action = legal_action def __call__( diff --git a/grid2op/Action/serializableActionSpace.py b/grid2op/Action/serializableActionSpace.py index f1c8bbee7..79f409336 100644 --- a/grid2op/Action/serializableActionSpace.py +++ b/grid2op/Action/serializableActionSpace.py @@ -54,7 +54,7 @@ class SerializableActionSpace(SerializableSpace): '"which is not the type of action handled by this action space "' '("{}")') - def __init__(self, gridobj, actionClass=BaseAction, _init_grid=True): + def __init__(self, gridobj, actionClass=BaseAction, _init_grid=True, _local_dir_cls=None): """ INTERNAL USE ONLY @@ -74,7 +74,10 @@ def __init__(self, gridobj, actionClass=BaseAction, _init_grid=True): """ SerializableSpace.__init__( - self, gridobj=gridobj, subtype=actionClass, _init_grid=_init_grid + self, gridobj=gridobj, + subtype=actionClass, + _init_grid=_init_grid, + _local_dir_cls=_local_dir_cls ) self.actionClass = self.subtype self._template_act = self.actionClass() diff --git a/grid2op/Backend/backend.py b/grid2op/Backend/backend.py index 3e2b96d21..3e875e6ad 100644 --- a/grid2op/Backend/backend.py +++ b/grid2op/Backend/backend.py @@ -1019,12 +1019,7 @@ def _runpf_with_diverging_exception(self, is_dc : bool) -> Optional[Exception]: conv, exc_me = self.runpf(is_dc=is_dc) # run powerflow except Grid2OpException as exc_: exc_me = exc_ - # except Exception as exc_: - # exc_me = DivergingPowerflow( - # f" An unexpected error occurred during the computation of the powerflow." - # f"The error is: \n {exc_} \n. This is game over" - # ) - + if not conv and exc_me is None: exc_me = DivergingPowerflow( "GAME OVER: Powerflow has diverged during computation " @@ -2013,8 +2008,9 @@ def update_from_obs(self, '"grid2op.Observation.CompleteObservation".' ) - backend_action = self.my_bk_act_class() - act = self._complete_action_class() + cls = type(self) + backend_action = cls.my_bk_act_class() + act = cls._complete_action_class() line_status = self._aux_get_line_status_to_set(obs.line_status) # skip the action part and update directly the backend action ! dict_ = { @@ -2028,7 +2024,7 @@ def update_from_obs(self, }, } - if type(self).shunts_data_available and type(obs).shunts_data_available: + if cls.shunts_data_available and type(obs).shunts_data_available: if "_shunt_bus" not in type(obs).attr_list_set: raise BackendError( "Impossible to set the backend to the state given by the observation: shunts data " @@ -2045,13 +2041,13 @@ def update_from_obs(self, sh_q[~shunt_co] = np.NaN dict_["shunt"]["shunt_p"] = sh_p dict_["shunt"]["shunt_q"] = sh_q - elif type(self).shunts_data_available and not type(obs).shunts_data_available: + elif cls.shunts_data_available and not type(obs).shunts_data_available: warnings.warn("Backend supports shunt but not the observation. This behaviour is non standard.") act.update(dict_) backend_action += act self.apply_action(backend_action) - def assert_grid_correct(self) -> None: + def assert_grid_correct(self, _local_dir_cls=None) -> None: """ INTERNAL @@ -2060,9 +2056,6 @@ def assert_grid_correct(self) -> None: This is done as it should be by the Environment """ - # lazy loading - from grid2op.Action import CompleteAction - from grid2op.Action._backendAction import _BackendAction if hasattr(self, "_missing_two_busbars_support_info"): if self._missing_two_busbars_support_info: @@ -2086,23 +2079,21 @@ def assert_grid_correct(self) -> None: warnings.warn("Your backend is missing the `_missing_two_busbars_support_info` " "attribute. This is known issue in lightims2grid <= 0.7.5. Please " "upgrade your backend. This will raise an error in the future.") - + orig_type = type(self) - if orig_type.my_bk_act_class is None: + if orig_type.my_bk_act_class is None and orig_type._INIT_GRID_CLS is None: + # NB the second part of the "if": `orig_type._INIT_GRID_CLS is None` + # has been added in grid2Op 1.10.3 to handle multiprocessing correctly: + # classes passed in multi processing should not be initialized a second time + # class is already initialized # and set up the proper class and everything self._init_class_attr() - - # hack due to changing class of imported module in the module itself + future_cls = orig_type.init_grid( - type(self), force_module=type(self).__module__ + type(self), _local_dir_cls=_local_dir_cls ) self.__class__ = future_cls - setattr( - sys.modules[type(self).__module__], - self.__class__.__name__, - self.__class__, - ) # reset the attribute of the grid2op.Backend.Backend class # that can be messed up with depending on the initialization of the backend @@ -2113,13 +2104,21 @@ def assert_grid_correct(self) -> None: orig_type._clear_grid_dependant_class_attributes() my_cls = type(self) - my_cls.my_bk_act_class = _BackendAction.init_grid(my_cls) - my_cls._complete_action_class = CompleteAction.init_grid(my_cls) - my_cls._complete_action_class._add_shunt_data() - my_cls._complete_action_class._update_value_set() - my_cls.assert_grid_correct_cls() + my_cls._add_internal_classes(_local_dir_cls) self._remove_my_attr_cls() + @classmethod + def _add_internal_classes(cls, _local_dir_cls): + # lazy loading + from grid2op.Action import CompleteAction + from grid2op.Action._backendAction import _BackendAction + + cls.my_bk_act_class = _BackendAction.init_grid(cls, _local_dir_cls=_local_dir_cls) + cls._complete_action_class = CompleteAction.init_grid(cls, _local_dir_cls=_local_dir_cls) + cls._complete_action_class._add_shunt_data() + cls._complete_action_class._update_value_set() + cls.assert_grid_correct_cls() + def _remove_my_attr_cls(self): """ INTERNAL @@ -2160,22 +2159,22 @@ def assert_grid_correct_after_powerflow(self) -> None: if tmp.shape[0] != self.n_line: raise IncorrectNumberOfLines('returned by "backend.get_line_status()"') if (~np.isfinite(tmp)).any(): - raise EnvironmentError(type(self).ERR_INIT_POWERFLOW) + raise EnvError(type(self).ERR_INIT_POWERFLOW) tmp = self.get_line_flow() if tmp.shape[0] != self.n_line: raise IncorrectNumberOfLines('returned by "backend.get_line_flow()"') if (~np.isfinite(tmp)).any(): - raise EnvironmentError(type(self).ERR_INIT_POWERFLOW) + raise EnvError(type(self).ERR_INIT_POWERFLOW) tmp = self.get_thermal_limit() if tmp.shape[0] != self.n_line: raise IncorrectNumberOfLines('returned by "backend.get_thermal_limit()"') if (~np.isfinite(tmp)).any(): - raise EnvironmentError(type(self).ERR_INIT_POWERFLOW) + raise EnvError(type(self).ERR_INIT_POWERFLOW) tmp = self.get_line_overflow() if tmp.shape[0] != self.n_line: raise IncorrectNumberOfLines('returned by "backend.get_line_overflow()"') if (~np.isfinite(tmp)).any(): - raise EnvironmentError(type(self).ERR_INIT_POWERFLOW) + raise EnvError(type(self).ERR_INIT_POWERFLOW) tmp = self.generators_info() if len(tmp) != 3: diff --git a/grid2op/Backend/pandaPowerBackend.py b/grid2op/Backend/pandaPowerBackend.py index 95876334c..299043b65 100644 --- a/grid2op/Backend/pandaPowerBackend.py +++ b/grid2op/Backend/pandaPowerBackend.py @@ -17,6 +17,8 @@ import pandapower as pp import scipy +# check that pandapower does not introduce some +from packaging import version import grid2op from grid2op.dtypes import dt_int, dt_float, dt_bool @@ -24,6 +26,8 @@ from grid2op.Exceptions import BackendError from grid2op.Backend.backend import Backend +MIN_LS_VERSION_VM_PU = version.parse("0.6.0") + try: import numba NUMBA_ = True @@ -223,6 +227,7 @@ def __init__( self._in_service_line_col_id = None self._in_service_trafo_col_id = None self._in_service_storage_cold_id = None + self.div_exception = None def _check_for_non_modeled_elements(self): """This function check for elements in the pandapower grid that will have no impact on grid2op. @@ -353,30 +358,15 @@ def load_grid(self, i_ref = None self._iref_slack = None self._id_bus_added = None - with warnings.catch_warnings(): - warnings.filterwarnings("ignore") - try: - pp.runpp( - self._grid, - numba=self.with_numba, - lightsim2grid=self._lightsim2grid, - distributed_slack=self._dist_slack, - max_iteration=self._max_iter, - ) - except pp.powerflow.LoadflowNotConverged: - pp.rundcpp( - self._grid, - numba=self.with_numba, - lightsim2grid=self._lightsim2grid, - distributed_slack=self._dist_slack, - max_iteration=self._max_iter, - ) + + self._aux_run_pf_init() # run an intiail powerflow, just in case + new_pp_version = False if not "slack_weight" in self._grid.gen: self._grid.gen["slack_weight"] = 1.0 else: new_pp_version = True - + if np.all(~self._grid.gen["slack"]): # there are not defined slack bus on the data, i need to hack it up a little bit pd2ppc = self._grid._pd2ppc_lookups["bus"] # pd2ppc[pd_id] = ppc_id @@ -438,24 +428,7 @@ def load_grid(self, else: self.slack_id = (self._grid.gen["slack"].values).nonzero()[0] - with warnings.catch_warnings(): - warnings.filterwarnings("ignore") - try: - pp.runpp( - self._grid, - numba=self.with_numba, - lightsim2grid=self._lightsim2grid, - distributed_slack=self._dist_slack, - max_iteration=self._max_iter, - ) - except pp.powerflow.LoadflowNotConverged: - pp.rundcpp( - self._grid, - numba=self.with_numba, - lightsim2grid=self._lightsim2grid, - distributed_slack=self._dist_slack, - max_iteration=self._max_iter, - ) + self._aux_run_pf_init() # run another powerflow with the added generator self.__nb_bus_before = self._grid.bus.shape[0] self.__nb_powerline = self._grid.line.shape[0] @@ -567,12 +540,42 @@ def load_grid(self, for ind, el in add_topo.iterrows(): pp.create_bus(self._grid, index=ind, **el) self._init_private_attrs() + self._aux_run_pf_init() # run yet another powerflow with the added buses # do this at the end self._in_service_line_col_id = int((self._grid.line.columns == "in_service").nonzero()[0][0]) self._in_service_trafo_col_id = int((self._grid.trafo.columns == "in_service").nonzero()[0][0]) self._in_service_storage_cold_id = int((self._grid.storage.columns == "in_service").nonzero()[0][0]) - + self.comp_time = 0. + + # hack for backward compat with oldest lightsim2grid version + try: + import lightsim2grid + if version.parse(lightsim2grid.__version__) < MIN_LS_VERSION_VM_PU: + warnings.warn("You are using a really old version of lightsim2grid. Consider upgrading.") + if "_options" in self._grid and "init_vm_pu" in self._grid["_options"]: + try: + float(self._grid["_options"]["init_vm_pu"]) + except ValueError as exc_: + # we delete it because lightsim2grid uses it + # to init its internal "GridModel" and did not check that + # this is a float until MIN_LS_VERSION_VM_PU + del self._grid["_options"]["init_vm_pu"] + except ImportError: + # lightsim2grid is not installed, so no risk to contaminate it + pass + + def _aux_run_pf_init(self): + """run a powerflow when the file is being loaded. This is called three times for each call to "load_grid" """ + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + try: + self._aux_runpf_pp(False) + if not self._grid.converged: + raise pp.powerflow.LoadflowNotConverged + except pp.powerflow.LoadflowNotConverged: + self._aux_runpf_pp(True) + def _init_private_attrs(self) -> None: # number of elements per substation self.sub_info = np.zeros(self.n_sub, dtype=dt_int) @@ -691,23 +694,23 @@ def _init_private_attrs(self) -> None: "prod_v" ] = self._load_grid_gen_vm_pu # lambda grid: grid.gen["vm_pu"] - self.load_pu_to_kv = self._grid.bus["vn_kv"][self.load_to_subid].values.astype( + self.load_pu_to_kv = 1. * self._grid.bus["vn_kv"][self.load_to_subid].values.astype( dt_float ) - self.prod_pu_to_kv = self._grid.bus["vn_kv"][self.gen_to_subid].values.astype( + self.prod_pu_to_kv = 1. * self._grid.bus["vn_kv"][self.gen_to_subid].values.astype( dt_float ) - self.lines_or_pu_to_kv = self._grid.bus["vn_kv"][ + self.lines_or_pu_to_kv = 1. * self._grid.bus["vn_kv"][ self.line_or_to_subid ].values.astype(dt_float) - self.lines_ex_pu_to_kv = self._grid.bus["vn_kv"][ + self.lines_ex_pu_to_kv = 1. * self._grid.bus["vn_kv"][ self.line_ex_to_subid ].values.astype(dt_float) - self.storage_pu_to_kv = self._grid.bus["vn_kv"][ + self.storage_pu_to_kv = 1. * self._grid.bus["vn_kv"][ self.storage_to_subid ].values.astype(dt_float) - self.thermal_limit_a = 1000 * np.concatenate( + self.thermal_limit_a = 1000. * np.concatenate( ( self._grid.line["max_i_ka"].values, self._grid.trafo["sn_mva"].values @@ -827,7 +830,7 @@ def apply_action(self, backendAction: Union["grid2op.Action._backendAction._Back """ if backendAction is None: return - + cls = type(self) ( @@ -1012,13 +1015,14 @@ def _aux_runpf_pp(self, is_dc: bool): ) warnings.filterwarnings("ignore", category=RuntimeWarning) warnings.filterwarnings("ignore", category=DeprecationWarning) - nb_bus = self.get_nb_active_bus() - if self._nb_bus_before is None: - self._pf_init = "dc" - elif nb_bus == self._nb_bus_before: - self._pf_init = "results" - else: - self._pf_init = "auto" + self._pf_init = "dc" + # nb_bus = self.get_nb_active_bus() + # if self._nb_bus_before is None: + # self._pf_init = "dc" + # elif nb_bus == self._nb_bus_before: + # self._pf_init = "results" + # else: + # self._pf_init = "auto" if (~self._grid.load["in_service"]).any(): # TODO see if there is a better way here -> do not handle this here, but rather in Backend._next_grid_state @@ -1081,12 +1085,13 @@ def runpf(self, is_dc : bool=False) -> Tuple[bool, Union[Exception, None]]: """ try: self._aux_runpf_pp(is_dc) - - cls = type(self) + cls = type(self) # if a connected bus has a no voltage, it's a divergence (grid was not connected) if self._grid.res_bus.loc[self._grid.bus["in_service"]]["va_degree"].isnull().any(): - raise pp.powerflow.LoadflowNotConverged("Isolated bus") - + buses_ko = self._grid.res_bus.loc[self._grid.bus["in_service"]]["va_degree"].isnull() + buses_ko = buses_ko.values.nonzero()[0] + raise pp.powerflow.LoadflowNotConverged(f"Isolated bus, check buses {buses_ko} with `env.backend._grid.res_bus.iloc[{buses_ko}, :]`") + ( self.prod_p[:], self.prod_q[:], @@ -1104,7 +1109,7 @@ def runpf(self, is_dc : bool=False) -> Tuple[bool, Union[Exception, None]]: if not np.isfinite(self.load_v).all(): # TODO see if there is a better way here # some loads are disconnected: it's a game over case! - raise pp.powerflow.LoadflowNotConverged("Isolated load") + raise pp.powerflow.LoadflowNotConverged(f"Isolated load: check loads {np.isfinite(self.load_v).nonzero()[0]}") else: # fix voltages magnitude that are always "nan" for dc case # self._grid.res_bus["vm_pu"] is always nan when computed in DC @@ -1130,7 +1135,7 @@ def runpf(self, is_dc : bool=False) -> Tuple[bool, Union[Exception, None]]: self.p_or[:] = self._aux_get_line_info("p_from_mw", "p_hv_mw") self.q_or[:] = self._aux_get_line_info("q_from_mvar", "q_hv_mvar") self.v_or[:] = self._aux_get_line_info("vm_from_pu", "vm_hv_pu") - self.a_or[:] = self._aux_get_line_info("i_from_ka", "i_hv_ka") * 1000 + self.a_or[:] = self._aux_get_line_info("i_from_ka", "i_hv_ka") * 1000. self.theta_or[:] = self._aux_get_line_info( "va_from_degree", "va_hv_degree" ) @@ -1140,7 +1145,7 @@ def runpf(self, is_dc : bool=False) -> Tuple[bool, Union[Exception, None]]: self.p_ex[:] = self._aux_get_line_info("p_to_mw", "p_lv_mw") self.q_ex[:] = self._aux_get_line_info("q_to_mvar", "q_lv_mvar") self.v_ex[:] = self._aux_get_line_info("vm_to_pu", "vm_lv_pu") - self.a_ex[:] = self._aux_get_line_info("i_to_ka", "i_lv_ka") * 1000 + self.a_ex[:] = self._aux_get_line_info("i_to_ka", "i_lv_ka") * 1000. self.theta_ex[:] = self._aux_get_line_info( "va_to_degree", "va_lv_degree" ) @@ -1158,7 +1163,9 @@ def runpf(self, is_dc : bool=False) -> Tuple[bool, Union[Exception, None]]: self.theta_ex[~np.isfinite(self.theta_ex)] = 0.0 self._nb_bus_before = None - self._grid._ppc["gen"][self._iref_slack, 1] = 0.0 + if self._iref_slack is not None: + # a gen has been added to represent the slack, modeled as an "ext_grid" + self._grid._ppc["gen"][self._iref_slack, 1] = 0.0 # handle storage units # note that we have to look ourselves for disconnected storage @@ -1179,13 +1186,17 @@ def runpf(self, is_dc : bool=False) -> Tuple[bool, Union[Exception, None]]: self._grid.storage["in_service"].values[deact_storage] = False self._topo_vect[:] = self._get_topo_vect() - return self._grid.converged, None + if not self._grid.converged: + raise pp.powerflow.LoadflowNotConverged("Divergence without specific reason (self._grid.converged is False)") + self.div_exception = None + return True, None except pp.powerflow.LoadflowNotConverged as exc_: # of the powerflow has not converged, results are Nan + self.div_exception = exc_ self._reset_all_nan() msg = exc_.__str__() - return False, BackendError(f'powerflow diverged with error :"{msg}"') + return False, BackendError(f'powerflow diverged with error :"{msg}", you can check `env.backend.div_exception` for more information') def _reset_all_nan(self) -> None: self.p_or[:] = np.NaN @@ -1221,7 +1232,6 @@ def copy(self) -> "PandaPowerBackend": This should return a deep copy of the Backend itself and not just the `self._grid` """ - # res = copy.deepcopy(self) # this was really slow... res = type(self)(**self._my_kwargs) # copy from base class (backend) @@ -1298,11 +1308,10 @@ def copy(self) -> "PandaPowerBackend": with warnings.catch_warnings(): warnings.simplefilter("ignore", FutureWarning) res.__pp_backend_initial_grid = copy.deepcopy(self.__pp_backend_initial_grid) - - res.tol = ( - self.tol - ) # this is NOT the pandapower tolerance !!!! this is used to check if a storage unit + + # this is NOT the pandapower tolerance !!!! this is used to check if a storage unit # produce / absorbs anything + res.tol = self.tol # TODO storage doc (in grid2op rst) of the backend res.can_output_theta = self.can_output_theta # I support the voltage angle @@ -1316,6 +1325,7 @@ def copy(self) -> "PandaPowerBackend": res._in_service_trafo_col_id = self._in_service_trafo_col_id res._missing_two_busbars_support_info = self._missing_two_busbars_support_info + res.div_exception = self.div_exception return res def close(self) -> None: diff --git a/grid2op/Chronics/GSFFWFWM.py b/grid2op/Chronics/GSFFWFWM.py index 28a0bf6fb..8ab2c1f22 100644 --- a/grid2op/Chronics/GSFFWFWM.py +++ b/grid2op/Chronics/GSFFWFWM.py @@ -108,6 +108,14 @@ def initialize( self.max_daily_number_per_month_maintenance = dict_[ "max_daily_number_per_month_maintenance" ] + + if "maintenance_day_of_week" in dict_: + self.maintenance_day_of_week = [int(el) for el in dict_[ + "maintenance_day_of_week" + ]] + else: + self.maintenance_day_of_week = np.arange(5) + super().initialize( order_backend_loads, order_backend_prods, @@ -133,7 +141,6 @@ def _sample_maintenance(self): ######## # new method to introduce generated maintenance self.maintenance = self._generate_maintenance() # - ########## # same as before in GridStateFromFileWithForecasts GridStateFromFileWithForecastsWithMaintenance._fix_maintenance_format(self) @@ -171,7 +178,12 @@ def _generate_matenance_static(name_line, daily_proba_per_month_maintenance, max_daily_number_per_month_maintenance, space_prng, + maintenance_day_of_week=None ): + if maintenance_day_of_week is None: + # new in grid2op 1.10.3 + maintenance_day_of_week = np.arange(5) + # define maintenance dataframe with size (nbtimesteps,nlines) columnsNames = name_line nbTimesteps = n_ @@ -203,8 +215,6 @@ def _generate_matenance_static(name_line, datelist = datelist[:-1] n_lines_maintenance = len(line_to_maintenance) - - _24_h = timedelta(seconds=86400) nb_rows = int(86400 / time_interval.total_seconds()) selected_rows_beg = int( maintenance_starting_hour * 3600 / time_interval.total_seconds() @@ -220,7 +230,7 @@ def _generate_matenance_static(name_line, maxDailyMaintenance = -1 for nb_day_since_beg, this_day in enumerate(datelist): dayOfWeek = this_day.weekday() - if dayOfWeek < 5: # only maintenance starting on working days + if dayOfWeek in maintenance_day_of_week: month = this_day.month maintenance_me = np.zeros((nb_rows, nb_line_maint)) @@ -279,5 +289,9 @@ def _generate_maintenance(self): self.maintenance_ending_hour, self.daily_proba_per_month_maintenance, self.max_daily_number_per_month_maintenance, - self.space_prng + self.space_prng, + self.maintenance_day_of_week ) + + def regenerate_with_new_seed(self): + self._sample_maintenance() diff --git a/grid2op/Chronics/chronicsHandler.py b/grid2op/Chronics/chronicsHandler.py index 44ad9256f..9f04c8f92 100644 --- a/grid2op/Chronics/chronicsHandler.py +++ b/grid2op/Chronics/chronicsHandler.py @@ -160,13 +160,21 @@ def get_name(self): """ return str(os.path.split(self.get_id())[-1]) - def set_max_iter(self, max_iter: int): + def _set_max_iter(self, max_iter: int): """ This function is used to set the maximum number of iterations possible before the chronics ends. You can reset this by setting it to `-1`. + .. danger:: + As for grid2op 1.10.3, due to the fix of a bug when + max_iter and fast_forward were used at the same time + you should not use this function anymore. + + Please use `env.set_max_iter()` instead of + `env.chronics_hander.set_max_iter()` + Parameters ---------- max_iter: ``int`` @@ -175,9 +183,9 @@ def set_max_iter(self, max_iter: int): """ - if not isinstance(max_iter, int): + if not isinstance(max_iter, (int, dt_int, np.int64)): raise Grid2OpException( - "The maximum number of iterations possible for this chronics, before it ends." + "The maximum number of iterations possible for this time series, before it ends should be an int" ) if max_iter == 0: raise Grid2OpException( @@ -227,5 +235,7 @@ def cleanup_action_space(self): """INTERNAL, used to forget the "old" action_space when the chronics_handler is copied for example. """ + if self._real_data is None: + return self._real_data.cleanup_action_space() \ No newline at end of file diff --git a/grid2op/Chronics/fromChronix2grid.py b/grid2op/Chronics/fromChronix2grid.py index 2831f8d9d..9c6843404 100644 --- a/grid2op/Chronics/fromChronix2grid.py +++ b/grid2op/Chronics/fromChronix2grid.py @@ -309,4 +309,12 @@ def next_chronics(self): GridStateFromFileWithForecastsWithMaintenance._fix_maintenance_format(self) self.check_validity(backend=None) + + def regenerate_with_new_seed(self): + raise ChronicsError("You should not 'cache' the data coming from the " + "`FromChronix2grid`, which is probably why you ended " + "up calling this function. If you want to generate data " + "'on the fly' please do not use the `MultiFolder` or " + "`MultiFolderWithCache` `chronics_class` when making your " + "environment.") \ No newline at end of file diff --git a/grid2op/Chronics/fromNPY.py b/grid2op/Chronics/fromNPY.py index 4d34f80ac..475f5aa7e 100644 --- a/grid2op/Chronics/fromNPY.py +++ b/grid2op/Chronics/fromNPY.py @@ -222,6 +222,7 @@ def __init__( ) self._init_state = init_state + self._max_iter = min(self._i_end - self._i_start, load_p.shape[0]) def initialize( self, @@ -252,6 +253,7 @@ def initialize( self.curr_iter = 0 self.current_index = self._i_start - 1 + self._max_iter = self._i_end - self._i_start def _get_long_hash(self, hash_: hashlib.blake2b = None): # get the "long hash" from blake2b @@ -420,6 +422,7 @@ def next_chronics(self): # update the forecast self._forecasts.next_chronics() self.check_validity(backend=None) + self._max_iter = self._i_end - self._i_start def done(self): """ @@ -648,6 +651,7 @@ def change_i_start(self, new_i_start: Union[int, None]): self.__new_istart = int(new_i_start) else: self.__new_istart = None + def change_i_end(self, new_i_end: Union[int, None]): """ diff --git a/grid2op/Chronics/gridValue.py b/grid2op/Chronics/gridValue.py index e49c6bb57..44cc2cb5c 100644 --- a/grid2op/Chronics/gridValue.py +++ b/grid2op/Chronics/gridValue.py @@ -856,3 +856,21 @@ def cleanup_action_space(self): """ self.__action_space = None # NB the action space is not closed as it is NOT own by this class + + def regenerate_with_new_seed(self): + """ + INTERNAL this function is called by some classes (*eg* :class:`MultifolderWithCache`) + when a new seed has been set. + + For example, if you use some 'chronics' that generate part of them randomly (*eg* + :class:`GridStateFromFileWithForecastsWithMaintenance`) they need to be aware of this + so that a reset actually update the seeds. + + This is closely related to issue https://github.com/rte-france/Grid2Op/issues/616 + + .. danger:: + This function should be called only once (not 0, not twice) after a "seed" function has been set. + Otherwise results might not be fully reproducible. + + """ + pass diff --git a/grid2op/Chronics/handlers/baseHandler.py b/grid2op/Chronics/handlers/baseHandler.py index 0cb51d9a9..329e06f79 100644 --- a/grid2op/Chronics/handlers/baseHandler.py +++ b/grid2op/Chronics/handlers/baseHandler.py @@ -73,7 +73,7 @@ def __init__(self, array_name, max_iter=-1, h_forecast=(5, )): self.path : Optional[os.PathLike] = None self.max_episode_duration : Optional[int] = None - def set_max_iter(self, max_iter: Optional[int]) -> None: + def _set_max_iter(self, max_iter: Optional[int]) -> None: """ INTERNAL @@ -494,3 +494,14 @@ def get_init_dict_action(self) -> Union[dict, None]: action space. """ raise NotImplementedError() + + def regenerate_with_new_seed(self): + """This function is called in case of data being "cached" (for example using the + :class:`grid2op.Chronics.MultifolderWithCache`) + + In this case, the data in cache needs to be updated if the seed has changed since + the time they have been added to it. + + If your handler has some random part, we recommend you to implement this function. + Otherwise feel free to ignore it""" + pass diff --git a/grid2op/Chronics/handlers/csvForecastHandler.py b/grid2op/Chronics/handlers/csvForecastHandler.py index 046ac8704..cf08a0eaa 100644 --- a/grid2op/Chronics/handlers/csvForecastHandler.py +++ b/grid2op/Chronics/handlers/csvForecastHandler.py @@ -93,8 +93,8 @@ def load_next(self, dict_): def set_chunk_size(self, chunk_size): super().set_chunk_size(self._nb_row_per_step * int(chunk_size)) - def set_max_iter(self, max_iter): - super().set_max_iter(self._nb_row_per_step * int(max_iter)) + def _set_max_iter(self, max_iter): + super()._set_max_iter(self._nb_row_per_step * int(max_iter)) def set_h_forecast(self, h_forecast): super().set_h_forecast(h_forecast) diff --git a/grid2op/Chronics/handlers/jsonMaintenanceHandler.py b/grid2op/Chronics/handlers/jsonMaintenanceHandler.py index 27d2eef7f..3b891ab21 100644 --- a/grid2op/Chronics/handlers/jsonMaintenanceHandler.py +++ b/grid2op/Chronics/handlers/jsonMaintenanceHandler.py @@ -63,7 +63,8 @@ def __init__(self, self.n_line = None # used in one of the GridStateFromFileWithForecastsWithMaintenance functions self._duration_episode_default = _duration_episode_default self.current_index = 0 - + self._order_backend_arrays = None + def get_maintenance_time_1d(self, maintenance): return GridValue.get_maintenance_time_1d(maintenance) @@ -82,7 +83,8 @@ def _create_maintenance_arrays(self, current_datetime): self.dict_meta_data["maintenance_ending_hour"], self.dict_meta_data["daily_proba_per_month_maintenance"], self.dict_meta_data["max_daily_number_per_month_maintenance"], - self.space_prng + self.space_prng, + self.dict_meta_data["maintenance_day_of_week"] if "maintenance_day_of_week" in self.dict_meta_data else None ) GridStateFromFileWithForecastsWithMaintenance._fix_maintenance_format(self) @@ -128,4 +130,8 @@ def _clear(self): def done(self): # maintenance can be generated on the fly so they are never "done" - return False \ No newline at end of file + return False + + def regenerate_with_new_seed(self): + if self.dict_meta_data is not None: + self._create_maintenance_arrays(self.init_datetime) diff --git a/grid2op/Chronics/handlers/noisyForecastHandler.py b/grid2op/Chronics/handlers/noisyForecastHandler.py index e047c9271..8fb4cc763 100644 --- a/grid2op/Chronics/handlers/noisyForecastHandler.py +++ b/grid2op/Chronics/handlers/noisyForecastHandler.py @@ -212,3 +212,7 @@ def forecast(self, res *= self._env_loss_ratio(inj_dict_env) # TODO ramps, pmin, pmax ! return res.astype(dt_float) if res is not None else None + + def regenerate_with_new_seed(self): + # there is nothing to do for this handler as things are generated "on the fly" + pass \ No newline at end of file diff --git a/grid2op/Chronics/multiFolder.py b/grid2op/Chronics/multiFolder.py index 47ed2fa53..e8b8c9b48 100644 --- a/grid2op/Chronics/multiFolder.py +++ b/grid2op/Chronics/multiFolder.py @@ -394,6 +394,17 @@ def reset(self): self._order = np.array(self._order) return self.subpaths[self._order] + def _get_nex_data(self, this_path): + res = self.gridvalueClass( + time_interval=self.time_interval, + sep=self.sep, + path=this_path, + max_iter=self.max_iter, + chunk_size=self.chunk_size, + **self._kwargs + ) + return res + def initialize( self, order_backend_loads, @@ -419,14 +430,7 @@ def initialize( id_scenario = self._order[self._prev_cache_id] this_path = self.subpaths[id_scenario] - self.data = self.gridvalueClass( - time_interval=self.time_interval, - sep=self.sep, - path=this_path, - max_iter=self.max_iter, - chunk_size=self.chunk_size, - **self._kwargs - ) + self.data = self._get_nex_data(this_path) if self.seed is not None: max_int = np.iinfo(dt_int).max seed_chronics = self.space_prng.randint(max_int) @@ -441,6 +445,7 @@ def initialize( ) if self.action_space is not None: self.data.action_space = self.action_space + self._max_iter = self.data.max_iter def done(self): """ @@ -787,4 +792,6 @@ def get_init_action(self, names_chronics_to_backend: Optional[Dict[Literal["load def cleanup_action_space(self): super().cleanup_action_space() + if self.data is None: + return self.data.cleanup_action_space() diff --git a/grid2op/Chronics/multifolderWithCache.py b/grid2op/Chronics/multifolderWithCache.py index e5a5755bd..436842841 100644 --- a/grid2op/Chronics/multifolderWithCache.py +++ b/grid2op/Chronics/multifolderWithCache.py @@ -7,10 +7,12 @@ # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. import numpy as np from datetime import timedelta, datetime +import warnings from grid2op.dtypes import dt_int from grid2op.Chronics.multiFolder import Multifolder from grid2op.Chronics.gridStateFromFile import GridStateFromFile +from grid2op.Chronics.time_series_from_handlers import FromHandlers from grid2op.Exceptions import ChronicsError @@ -70,7 +72,7 @@ class MultifolderWithCache(Multifolder): env = make(...,chronics_class=MultifolderWithCache) # set the chronics to limit to one week of data (lower memory footprint) - env.chronics_handler.set_max_iter(7*288) + env.set_max_iter(7*288) # assign a filter, use only chronics that have "december" in their name env.chronics_handler.real_data.set_filter(lambda x: re.match(".*december.*", x) is not None) # create the cache @@ -140,12 +142,18 @@ def __init__( ) self._cached_data = None self.cache_size = 0 - if not issubclass(self.gridvalueClass, GridStateFromFile): + if not (issubclass(self.gridvalueClass, GridStateFromFile) or + issubclass(self.gridvalueClass, FromHandlers)): raise RuntimeError( 'MultifolderWithCache does not work when "gridvalueClass" does not inherit from ' '"GridStateFromFile".' ) + if issubclass(self.gridvalueClass, FromHandlers): + warnings.warn("You use caching with handler data. This is possible but " + "might be a bit risky especially if your handlers are " + "heavily 'random' and you want fully reproducible results.") self.__i = 0 + self._cached_seeds = None def _default_filter(self, x): """ @@ -162,6 +170,11 @@ def reset(self): Rebuilt the cache as if it were built from scratch. This call might take a while to process. + This means that current data in cache will be discarded and that new data will + most likely be read from the hard drive. + + This might take a while. + .. danger:: You NEED to call this function (with `env.chronics_handler.reset()`) if you use the `MultiFolderWithCache` class in your experiments. @@ -180,16 +193,10 @@ def reset(self): for i in self._order: # everything in "_order" need to be put in cache path = self.subpaths[i] - data = self.gridvalueClass( - time_interval=self.time_interval, - sep=self.sep, - path=path, - max_iter=self.max_iter, - chunk_size=None, - ) - if self.seed_used is not None: - seed_chronics = self.space_prng.randint(max_int) - data.seed(seed_chronics) + data = self._get_nex_data(path) + + if self._cached_seeds is not None: + data.seed(self._cached_seeds[i]) data.initialize( self._order_backend_loads, @@ -198,6 +205,10 @@ def reset(self): self._order_backend_subs, self._names_chronics_to_backend, ) + + if self._cached_seeds is not None: + data.regenerate_with_new_seed() + self._cached_data[i] = data self.cache_size += 1 if self.action_space is not None: @@ -233,12 +244,16 @@ def initialize( self.n_load = len(order_backend_loads) self.n_line = len(order_backend_lines) if self._cached_data is None: - # initialize the cache + # initialize the cache of this MultiFolder self.reset() id_scenario = self._order[self._prev_cache_id] self.data = self._cached_data[id_scenario] self.data.next_chronics() + if self.seed_used is not None and self.data.seed_used != self._cached_seeds[id_scenario]: + self.data.seed(self._cached_seeds[id_scenario]) + self.data.regenerate_with_new_seed() + self._max_iter = self.data.max_iter @property def max_iter(self): @@ -260,6 +275,15 @@ def seed(self, seed : int): (which has an impact for example on :func:`MultiFolder.sample_next_chronics`) and each data present in the cache. + .. warning:: + Before grid2op version 1.10.3 this function did not fully ensured + reproducible experiments (the cache was not update with the new seed) + + For grid2op 1.10.3 and after, this function might trigger some modification + in the cached data (calling :func:`GridValue.seed` and then + :func:`GridValue.regenerate_with_new_seed`). It might take a while if the cache + is large. + Parameters ---------- seed : int @@ -267,12 +291,15 @@ def seed(self, seed : int): """ res = super().seed(seed) max_int = np.iinfo(dt_int).max + self._cached_seeds = np.empty(shape=self._order.shape, dtype=dt_int) for i in self._order: data = self._cached_data[i] + seed_ts = self.space_prng.randint(max_int) + self._cached_seeds[i] = seed_ts if data is None: continue - seed_ts = self.space_prng.randint(max_int) data.seed(seed_ts) + data.regenerate_with_new_seed() return res def load_next(self): @@ -284,9 +311,66 @@ def load_next(self): return super().load_next() def set_filter(self, filter_fun): + """ + Assign a filtering function to remove some chronics from the next time a call to "reset_cache" is called. + + **NB** filter_fun is applied to all element of :attr:`Multifolder.subpaths`. If ``True`` then it will + be put in cache, if ``False`` this data will NOT be put in the cache. + + **NB** this has no effect until :attr:`Multifolder.reset` is called. + + + .. danger:: + Calling this function cancels the previous seed used. If you use `env.seed` + or `env.chronics_handler.seed` before then you need to + call it again after otherwise it has no effect. + + Parameters + ---------- + filter_fun : _type_ + _description_ + + Examples + -------- + Let's assume in your chronics, the folder names are "Scenario_august_dummy", and + "Scenario_february_dummy". For the sake of the example, we want the environment to loop + only through the month of february, because why not. Then we can do the following: + + .. code-block:: python + + import re + import grid2op + env = grid2op.make("l2rpn_neurips_2020_track1", test=True) # don't add "test=True" if + # you don't want to perform a test. + + # check at which month will belong each observation + for i in range(10): + obs = env.reset() + print(obs.month) + # it always alternatively prints "8" (if chronics if from august) or + # "2" if chronics is from february) + + # to see where the chronics are located + print(env.chronics_handler.subpaths) + + # keep only the month of february + env.chronics_handler.set_filter(lambda path: re.match(".*february.*", path) is not None) + env.chronics_handler.reset() # if you don't do that it will not have any effect + + for i in range(10): + obs = env.reset() + print(obs.month) + # it always prints "2" (representing february) + + Returns + ------- + _type_ + _description_ + """ self.__nb_reset_called = 0 self.__nb_step_called = 0 self.__nb_init_called = 0 + self._cached_seeds = None return super().set_filter(filter_fun) def get_kwargs(self, dict_): diff --git a/grid2op/Chronics/time_series_from_handlers.py b/grid2op/Chronics/time_series_from_handlers.py index d3a3af4aa..646cf3deb 100644 --- a/grid2op/Chronics/time_series_from_handlers.py +++ b/grid2op/Chronics/time_series_from_handlers.py @@ -204,7 +204,7 @@ def __init__( self.set_chunk_size(chunk_size) if max_iter != -1: - self.set_max_iter(max_iter) + self._set_max_iter(max_iter) self.init_datetime() self.current_inj = None @@ -389,10 +389,10 @@ def set_chunk_size(self, new_chunk_size): for el in self._active_handlers: el.set_chunk_size(new_chunk_size) - def set_max_iter(self, max_iter): + def _set_max_iter(self, max_iter): self.max_iter = int(max_iter) for el in self._active_handlers: - el.set_max_iter(max_iter) + el._set_max_iter(max_iter) def init_datetime(self): for handl in self._active_handlers: @@ -560,3 +560,7 @@ def get_init_action(self, names_chronics_to_backend: Optional[Dict[Literal["load raise Grid2OpException(f"The action to set the grid to its original configuration " f"is ambiguous. Please check {self.init_state_handler.path}") from reason return act + + def regenerate_with_new_seed(self): + for handl in self._active_handlers: + handl.regenerate_with_new_seed() diff --git a/grid2op/Converter/BackendConverter.py b/grid2op/Converter/BackendConverter.py index dfd0ced63..4c023c85e 100644 --- a/grid2op/Converter/BackendConverter.py +++ b/grid2op/Converter/BackendConverter.py @@ -429,7 +429,7 @@ def _auto_fill_vect_topo_aux(self, n_elem, source_pos, target_pos, sr2tg): self._topo_tg2sr[source_pos[sr2tg]] = target_pos self._topo_sr2tg[target_pos] = source_pos[sr2tg] - def assert_grid_correct(self): + def assert_grid_correct(self, _local_dir_cls=None) -> None: # this is done before a call to this function, by the environment tg_cls = type(self.target_backend) sr_cls = type(self.source_backend) @@ -480,13 +480,13 @@ def assert_grid_correct(self): ) # init the target backend (the one that does the computation and that is initialized) - self.target_backend.assert_grid_correct() + self.target_backend.assert_grid_correct(_local_dir_cls=_local_dir_cls) # initialize the other one, because, well the grid should be seen from both backend self.source_backend._init_class_attr(obj=self) - self.source_backend.assert_grid_correct() + self.source_backend.assert_grid_correct(_local_dir_cls=_local_dir_cls) # and this should be called after all the rest - super().assert_grid_correct() + super().assert_grid_correct(_local_dir_cls=_local_dir_cls) # everything went well, so i can properly terminate my initialization self._init_myself() diff --git a/grid2op/Converter/IdToAct.py b/grid2op/Converter/IdToAct.py index be96e992d..063b1f59d 100644 --- a/grid2op/Converter/IdToAct.py +++ b/grid2op/Converter/IdToAct.py @@ -70,6 +70,7 @@ class IdToAct(Converter): def __init__(self, action_space): Converter.__init__(self, action_space) self.__class__ = IdToAct.init_grid(action_space) + self.init_action_space = action_space self.all_actions = [] # add the do nothing topology self.all_actions.append(super().__call__()) diff --git a/grid2op/Environment/_forecast_env.py b/grid2op/Environment/_forecast_env.py index ad08fc7df..7378df7c7 100644 --- a/grid2op/Environment/_forecast_env.py +++ b/grid2op/Environment/_forecast_env.py @@ -21,6 +21,7 @@ def __init__(self, *args, **kwargs): if "_update_obs_after_reward" not in kwargs: kwargs["_update_obs_after_reward"] = False super().__init__(*args, **kwargs) + self._do_not_erase_local_dir_cls = True def step(self, action: BaseAction) -> Tuple[BaseObservation, float, bool, dict]: self._highres_sim_counter += 1 diff --git a/grid2op/Environment/_obsEnv.py b/grid2op/Environment/_obsEnv.py index 0c713f707..172235eb7 100644 --- a/grid2op/Environment/_obsEnv.py +++ b/grid2op/Environment/_obsEnv.py @@ -75,6 +75,8 @@ def __init__( highres_sim_counter=None, _complete_action_cls=None, _ptr_orig_obs_space=None, + _local_dir_cls=None, # only set at the first call to `make(...)` after should be false + _read_from_local_dir=None, ): BaseEnv.__init__( self, @@ -92,7 +94,10 @@ def __init__( logger=logger, highres_sim_counter=highres_sim_counter, update_obs_after_reward=False, + _local_dir_cls=_local_dir_cls, + _read_from_local_dir=_read_from_local_dir ) + self._do_not_erase_local_dir_cls = True self.__unusable = False # unsuable if backend cannot be copied self._reward_helper = reward_helper @@ -101,12 +106,13 @@ def __init__( # initialize the observation space self._obsClass = None - + + cls = type(self) # line status (inherited from BaseEnv) - self._line_status = np.full(self.n_line, dtype=dt_bool, fill_value=True) + self._line_status = np.full(cls.n_line, dtype=dt_bool, fill_value=True) # line status (for this usage) self._line_status_me = np.ones( - shape=self.n_line, dtype=dt_int + shape=cls.n_line, dtype=dt_int ) # this is "line status" but encode in +1 / -1 if self._thermal_limit_a is None: @@ -114,6 +120,8 @@ def __init__( else: self._thermal_limit_a[:] = thermal_limit_a + self.current_obs_init = None + self.current_obs = None self._init_backend( chronics_handler=_ObsCH(), backend=backend_instanciated, @@ -128,13 +136,13 @@ def __init__( #### # to be able to save and import (using env.generate_classes) correctly self._actionClass = action_helper.subtype - self._observationClass = _complete_action_cls # not used anyway self._complete_action_cls = _complete_action_cls self._action_space = ( action_helper # obs env and env share the same action space ) self._ptr_orig_obs_space = _ptr_orig_obs_space + #### self.no_overflow_disconnection = parameters.NO_OVERFLOW_DISCONNECTION @@ -178,6 +186,8 @@ def _init_backend( if backend is None: self.__unusable = True return + self._actionClass_orig = actionClass + self._observationClass_orig = observationClass self.__unusable = False self._env_dc = self.parameters.ENV_DC @@ -195,19 +205,22 @@ def _init_backend( from grid2op.Observation import ObservationSpace from grid2op.Reward import FlatReward - ob_sp_cls = ObservationSpace.init_grid(type(backend)) + ob_sp_cls = ObservationSpace.init_grid(type(backend), _local_dir_cls=self._local_dir_cls) self._observation_space = ob_sp_cls(type(backend), env=self, with_forecast=False, rewardClass=FlatReward, - _with_obs_env=False) + _with_obs_env=False, + _local_dir_cls=self._local_dir_cls + ) + self._observationClass = self._observation_space.subtype # not used anyway # create the opponent self._create_opponent() # create the attention budget self._create_attention_budget() - self._obsClass = observationClass.init_grid(type(self.backend)) + self._obsClass = observationClass.init_grid(type(self.backend), _local_dir_cls=self._local_dir_cls) self._obsClass._INIT_GRID_CLS = observationClass self.current_obs_init = self._obsClass(obs_env=None, action_helper=None) self.current_obs = self.current_obs_init @@ -216,7 +229,7 @@ def _init_backend( self._init_alert_data() # backend has loaded everything - self._hazard_duration = np.zeros(shape=self.n_line, dtype=dt_int) + self._hazard_duration = np.zeros(shape=type(self).n_line, dtype=dt_int) def _do_nothing(self, x): """ @@ -247,7 +260,7 @@ def _update_actions(self): # This "environment" doesn't modify anything return self._do_nothing_act, None - def copy(self): + def copy(self, env=None, new_obs_space=None): """ INTERNAL @@ -263,17 +276,44 @@ def copy(self): if self.__unusable: raise EnvError("Impossible to use a Observation backend with an " "environment that cannot be copied.") - backend = self.backend - self.backend = None - _highres_sim_counter = self._highres_sim_counter - self._highres_sim_counter = None - with warnings.catch_warnings(): - warnings.simplefilter("ignore", FutureWarning) - res = copy.deepcopy(self) - res.backend = backend.copy() - res._highres_sim_counter = _highres_sim_counter - self.backend = backend - self._highres_sim_counter = _highres_sim_counter + + my_cls = type(self) + res = my_cls.__new__(my_cls) + + # fill its attribute + res.__unusable = self.__unusable + res._obsClass = self._obsClass + res._line_status = copy.deepcopy(self._line_status) + res._line_status_me = copy.deepcopy(self._line_status_me) + if env is not None: + # res._ptr_orig_obs_space = env._observation_space # this is not created when this function is called + # so this is why i pass the `new_obs_space` as argument + res._ptr_orig_obs_space = new_obs_space + else: + res._ptr_orig_obs_space = self._ptr_orig_obs_space + res.no_overflow_disconnection = self.parameters.NO_OVERFLOW_DISCONNECTION + res._topo_vect = copy.deepcopy(self._topo_vect) + res.is_init = self.is_init + if env is not None: + res._helper_action_env = env._helper_action_env + else: + res._helper_action_env = self._helper_action_env + res._disc_lines = copy.deepcopy(self._disc_lines) + res._highres_sim_counter = self._highres_sim_counter + res._max_episode_duration = self._max_episode_duration + + res.current_obs_init = self._obsClass(obs_env=None, action_helper=None) + res.current_obs_init.reset() + res.current_obs = res.current_obs_init + + # copy attribute of "super" + super()._custom_deepcopy_for_copy(res) + + # finish to initialize res + res.env_modification = res._helper_action_env() + res._do_nothing_act = res._helper_action_env() + res._backend_action_set = res._backend_action_class() + res.current_obs = res.current_obs_init return res def _reset_to_orig_state(self, obs): diff --git a/grid2op/Environment/baseEnv.py b/grid2op/Environment/baseEnv.py index 8dca86a37..440d89e9d 100644 --- a/grid2op/Environment/baseEnv.py +++ b/grid2op/Environment/baseEnv.py @@ -8,13 +8,15 @@ from datetime import datetime -import shutil +import tempfile import logging import time import copy import os import json from typing import Optional, Tuple, Union, Dict, Any, Literal +import importlib +import sys import warnings import numpy as np @@ -299,8 +301,7 @@ def foo(manager): #: this are the keys of the dictionnary `options` #: that can be used when calling `env.reset(..., options={})` - KEYS_RESET_OPTIONS = {"time serie id", "init state"} - + KEYS_RESET_OPTIONS = {"time serie id", "init state", "init ts", "max step"} def __init__( self, @@ -308,6 +309,7 @@ def __init__( init_grid_path: os.PathLike, parameters: Parameters, voltagecontrolerClass: type, + name="unknown", thermal_limit_a: Optional[np.ndarray] = None, epsilon_poly: float = 1e-4, # precision of the redispatching algorithm tol_poly: float = 1e-2, # i need to compute a redispatching if the actual values are "more than tol_poly" the values they should be @@ -333,10 +335,30 @@ def __init__( update_obs_after_reward=False, n_busbar=2, _is_test: bool = False, # TODO not implemented !! - _init_obs: Optional[BaseObservation] =None + _init_obs: Optional[BaseObservation] =None, + _local_dir_cls=None, + _read_from_local_dir=None, + _raw_backend_class=None, ): + #: flag to indicate not to erase the directory when the env has been used + self._do_not_erase_local_dir_cls = False GridObjects.__init__(self) RandomObject.__init__(self) + self.name = name + self._local_dir_cls = _local_dir_cls # suppose it's the second path to the environment, so the classes are already in the files + self._read_from_local_dir = _read_from_local_dir + if self._read_from_local_dir is not None: + if os.path.split(self._read_from_local_dir)[1] == "_grid2op_classes": + # legacy behaviour (using experimental_read_from_local_dir kwargs in env.make) + self._do_not_erase_local_dir_cls = True + else: + self._do_not_erase_local_dir_cls = True + + self._actionClass_orig = None + self._observationClass_orig = None + + self._raw_backend_class = _raw_backend_class + self._n_busbar = n_busbar # env attribute not class attribute ! if other_rewards is None: other_rewards = {} @@ -389,10 +411,10 @@ def __init__( # class used for the action spaces self._helper_action_class: ActionSpace = None - self._helper_observation_class: ActionSpace = None + self._helper_observation_class: ObservationSpace = None # and calendar data - self.time_stamp: time.struct_time = None + self.time_stamp: time.struct_time = datetime(year=2019, month=1, day=1) self.nb_time_step: datetime.timedelta = dt_int(0) self.delta_time_seconds = None # number of seconds between two consecutive step @@ -622,17 +644,26 @@ def _custom_deepcopy_for_copy(self, new_obj, dict_=None): if self.__closed: raise RuntimeError("Impossible to make a copy of a closed environment !") - if not self.backend._can_be_copied: - raise RuntimeError("Impossible to copy your environment: the backend " - "class you used cannot be copied.") + if hasattr(self.backend, "_can_be_copied"): + if not self.backend._can_be_copied: + # introduced later on, might not be copied perfectly for some older backends + raise RuntimeError("Impossible to copy your environment: the backend " + "class you used cannot be copied.") + # for earlier backend it is not possible to check this so I ignore it. + RandomObject._custom_deepcopy_for_copy(self, new_obj) + new_obj.name = self.name if dict_ is None: dict_ = {} new_obj._n_busbar = self._n_busbar new_obj._init_grid_path = copy.deepcopy(self._init_grid_path) new_obj._init_env_path = copy.deepcopy(self._init_env_path) + new_obj._local_dir_cls = None # copy of a env is not the "main" env. TODO + new_obj._do_not_erase_local_dir_cls = self._do_not_erase_local_dir_cls + new_obj._read_from_local_dir = self._read_from_local_dir + new_obj._raw_backend_class = self._raw_backend_class new_obj._DEBUG = self._DEBUG new_obj._parameters = copy.deepcopy(self._parameters) new_obj.with_forecast = self.with_forecast @@ -652,27 +683,23 @@ def _custom_deepcopy_for_copy(self, new_obj, dict_=None): new_obj._tol_poly = self._tol_poly # - new_obj._complete_action_cls = copy.deepcopy(self._complete_action_cls) + new_obj._complete_action_cls = self._complete_action_cls # const # define logger new_obj.logger = copy.deepcopy(self.logger) # TODO does that make any sense ? # class used for the action spaces new_obj._helper_action_class = self._helper_action_class # const - new_obj._helper_observation_class = self._helper_observation_class + new_obj._helper_observation_class = self._helper_observation_class # const # and calendar data new_obj.time_stamp = self.time_stamp new_obj.nb_time_step = self.nb_time_step new_obj.delta_time_seconds = self.delta_time_seconds - # observation - if self.current_obs is not None: - new_obj.current_obs = self.current_obs.copy() - # backend # backend action - new_obj._backend_action_class = self._backend_action_class + new_obj._backend_action_class = self._backend_action_class # const new_obj._backend_action = copy.deepcopy(self._backend_action) # specific to Basic Env, do not change @@ -761,25 +788,29 @@ def _custom_deepcopy_for_copy(self, new_obj, dict_=None): new_obj._rewardClass = self._rewardClass new_obj._actionClass = self._actionClass + new_obj._actionClass_orig = self._actionClass_orig new_obj._observationClass = self._observationClass + new_obj._observationClass_orig = self._observationClass_orig new_obj._legalActClass = self._legalActClass - new_obj._observation_space = self._observation_space.copy(copy_backend=True) - new_obj._observation_space._legal_action = ( - new_obj._game_rules.legal_action - ) # TODO this does not respect SOLID principles at all ! - new_obj._kwargs_observation = copy.deepcopy(self._kwargs_observation) - new_obj._observation_space._ptr_kwargs_observation = new_obj._kwargs_observation - new_obj._names_chronics_to_backend = self._names_chronics_to_backend - new_obj._reward_helper = copy.deepcopy(self._reward_helper) - - # gym compatibility - new_obj.reward_range = copy.deepcopy(self.reward_range) - new_obj._viewer = copy.deepcopy(self._viewer) - new_obj.viewer_fig = copy.deepcopy(self.viewer_fig) - + new_obj._names_chronics_to_backend = self._names_chronics_to_backend # cst + # other rewards - new_obj.other_rewards = copy.deepcopy(self.other_rewards) - + new_obj.other_rewards = {k: copy.deepcopy(v) for k, v in self.other_rewards.items()} + for extra_reward in new_obj.other_rewards.values(): + extra_reward.reset(new_obj) + + # voltage + new_obj._voltagecontrolerClass = self._voltagecontrolerClass + if self._voltage_controler is not None: + new_obj._voltage_controler = self._voltage_controler.copy() + else: + new_obj._voltage_controler = None + + # needed for the "Environment.get_kwargs(env, False, False)" (used in the observation_space) + new_obj._attention_budget_cls = self._attention_budget_cls # const + new_obj._kwargs_attention_budget = copy.deepcopy(self._kwargs_attention_budget) + new_obj._has_attention_budget = self._has_attention_budget + # opponent new_obj._opponent_space_type = self._opponent_space_type new_obj._opponent_action_class = self._opponent_action_class # const @@ -796,6 +827,27 @@ def _custom_deepcopy_for_copy(self, new_obj, dict_=None): new_obj._compute_opp_budget = self._opponent_budget_class( self._opponent_action_space ) + + new_obj._observation_bk_class = self._observation_bk_class + new_obj._observation_bk_kwargs = self._observation_bk_kwargs + + # do not copy it. + new_obj._highres_sim_counter = self._highres_sim_counter + + # observation space (might depends on the previous things) + # at this stage the function "Environment.get_kwargs(env, False, False)" should run + new_obj._kwargs_observation = copy.deepcopy(self._kwargs_observation) + new_obj._observation_space = self._observation_space.copy(copy_backend=True, env=new_obj) + new_obj._observation_space._legal_action = ( + new_obj._game_rules.legal_action + ) # TODO this does not respect SOLID principles at all ! + new_obj._observation_space._ptr_kwargs_observation = new_obj._kwargs_observation + new_obj._reward_helper = copy.deepcopy(self._reward_helper) + + # gym compatibility + new_obj.reward_range = copy.deepcopy(self.reward_range) + new_obj._viewer = copy.deepcopy(self._viewer) + new_obj.viewer_fig = copy.deepcopy(self.viewer_fig) # init the opponent new_obj._opponent = new_obj._opponent_class.__new__(new_obj._opponent_class) @@ -809,15 +861,11 @@ def _custom_deepcopy_for_copy(self, new_obj, dict_=None): attack_duration=new_obj._opponent_attack_duration, attack_cooldown=new_obj._opponent_attack_cooldown, budget_per_timestep=new_obj._opponent_budget_per_ts, - opponent=new_obj._opponent, + opponent=new_obj._opponent ) state_me, state_opp = self._oppSpace._get_state() new_obj._oppSpace._set_state(state_me) - - # voltage - new_obj._voltagecontrolerClass = self._voltagecontrolerClass - new_obj._voltage_controler = self._voltage_controler.copy() - + # to change the parameters new_obj.__new_param = copy.deepcopy(self.__new_param) new_obj.__new_forecast_param = copy.deepcopy(self.__new_forecast_param) @@ -841,19 +889,13 @@ def _custom_deepcopy_for_copy(self, new_obj, dict_=None): new_obj._limited_before = copy.deepcopy(self._limited_before) # attention budget - new_obj._has_attention_budget = self._has_attention_budget new_obj._attention_budget = copy.deepcopy(self._attention_budget) - new_obj._attention_budget_cls = self._attention_budget_cls # const new_obj._is_alarm_illegal = copy.deepcopy(self._is_alarm_illegal) new_obj._is_alarm_used_in_reward = copy.deepcopy(self._is_alarm_used_in_reward) # alert new_obj._is_alert_illegal = copy.deepcopy(self._is_alert_illegal) new_obj._is_alert_used_in_reward = copy.deepcopy(self._is_alert_used_in_reward) - - new_obj._kwargs_attention_budget = copy.deepcopy(self._kwargs_attention_budget) - - new_obj._last_obs = self._last_obs.copy() new_obj._has_just_been_seeded = self._has_just_been_seeded @@ -866,14 +908,8 @@ def _custom_deepcopy_for_copy(self, new_obj, dict_=None): else: new_obj._init_obs = self._init_obs.copy() - new_obj._observation_bk_class = self._observation_bk_class - new_obj._observation_bk_kwargs = self._observation_bk_kwargs - # do not forget ! - new_obj._is_test = self._is_test - - # do not copy it. - new_obj._highres_sim_counter = self._highres_sim_counter + new_obj._is_test = self._is_test # alert new_obj._last_alert = copy.deepcopy(self._last_alert) @@ -887,6 +923,17 @@ def _custom_deepcopy_for_copy(self, new_obj, dict_=None): new_obj._update_obs_after_reward = copy.deepcopy(self._update_obs_after_reward) + if self._last_obs is not None: + new_obj._last_obs = self._last_obs.copy(env=new_obj) + else: + new_obj._last_obs = None + + # observation + from grid2op.Environment._obsEnv import _ObsEnv + if self.current_obs is not None and not isinstance(self, _ObsEnv): + # breaks for some version of lightsim2grid... (a powerflow need to be run to retrieve the observation) + new_obj.current_obs = new_obj.get_obs() + def get_path_env(self): """ Get the path that allows to create this environment. @@ -1227,6 +1274,7 @@ def _create_opponent(self): gridobj=type(self.backend), legal_action=AlwaysLegal, actionClass=self._opponent_action_class, + _local_dir_cls=self._local_dir_cls ) self._compute_opp_budget = self._opponent_budget_class( @@ -1240,6 +1288,7 @@ def _create_opponent(self): attack_cooldown=self._opponent_attack_cooldown, budget_per_timestep=self._opponent_budget_per_ts, opponent=self._opponent, + _local_dir_cls=self._local_dir_cls, ) self._oppSpace.init_opponent(partial_env=self, **self._kwargs_opponent) self._oppSpace.reset() @@ -1249,13 +1298,19 @@ def _init_myclass(self): # the class has already been initialized return # remember the original grid2op class - type(self)._INIT_GRID_CLS = type(self) + orig_cls = type(self) - bk_type = type( - self.backend - ) # be careful here: you need to initialize from the class, and not from the object + # be careful here: you need to initialize from the class, and not from the object + bk_type = type(self.backend) # create the proper environment class for this specific environment - self.__class__ = type(self).init_grid(bk_type) + new_cls = type(self).init_grid(bk_type, _local_dir_cls=self._local_dir_cls) + # assign the right initial grid class + if orig_cls._INIT_GRID_CLS is None: + new_cls._INIT_GRID_CLS = orig_cls + else: + new_cls._INIT_GRID_CLS = orig_cls._INIT_GRID_CLS + + self.__class__ = new_cls def _has_been_initialized(self): # type of power flow to play @@ -1264,7 +1319,7 @@ def _has_been_initialized(self): bk_type = type(self.backend) if np.min([self.n_line, self.n_gen, self.n_load, self.n_sub]) <= 0: raise EnvironmentError("Environment has not been initialized properly") - self._backend_action_class = _BackendAction.init_grid(bk_type) + self._backend_action_class = _BackendAction.init_grid(bk_type, _local_dir_cls=self._local_dir_cls) self._backend_action = self._backend_action_class() # initialize maintenance / hazards @@ -3694,7 +3749,24 @@ def close(self): if hasattr(self, attr_nm): delattr(self, attr_nm) setattr(self, attr_nm, None) - + + if self._do_not_erase_local_dir_cls: + # The resources are not held by this env, so + # I do not remove them + # (case for ObsEnv or ForecastedEnv) + return + self._aux_close_local_dir_cls() + + def _aux_close_local_dir_cls(self): + if self._local_dir_cls is not None: + # I am the "keeper" of the temporary directory + # deleting this env should also delete the temporary directory + if not (hasattr(self._local_dir_cls, "_RUNNER_DO_NOT_ERASE") and not self._local_dir_cls._RUNNER_DO_NOT_ERASE): + # BUT if a runner uses it, then I should not delete it ! + self._local_dir_cls.cleanup() + self._local_dir_cls = None + # In this case it's likely that the OS will clean it for grid2op with a warning... + def attach_layout(self, grid_layout): """ Compare to the method of the base class, this one performs a check. @@ -3776,6 +3848,17 @@ def fast_forward_chronics(self, nb_timestep): 00:00). This can lead to suboptimal exploration, as during this phase, only a few time steps are managed by the agent, so in general these few time steps will correspond to grid state around Jan 1st at 00:00. + .. seealso:: + From grid2op version 1.10.3, a similar objective can be + obtained directly by calling :func:`grid2op.Environment.Environment.reset` with `"init ts"` + as option, for example like `obs = env.reset(options={"init ts": 12})` + + + .. danger:: + The usage of both :func:`BaseEnv.fast_forward_chronics` and :func:`Environment.set_max_iter` + is not recommended at all and might not behave correctly. Please use `env.reset` with + `obs = env.reset(options={"max step": xxx, "init ts": yyy})` for a correct behaviour. + Parameters ---------- nb_timestep: ``int`` @@ -3783,7 +3866,20 @@ def fast_forward_chronics(self, nb_timestep): Examples --------- - This can be used like this: + + From grid2op version 1.10.3 we recommend not to use this function (which will be deprecated) + but to use the :func:`grid2op.Environment.Environment.reset` functon with the `"init ts"` + option. + + .. code-block:: python + + import grid2op + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name) + + obs = env.reset(options={"init ts": 123}) + + For the legacy usave, this can be used like this: .. code-block:: python @@ -3934,30 +4030,84 @@ def change_reward(self, new_reward_func): ) self.__new_reward_func = new_reward_func - def _aux_gen_classes(self, cls, sys_path): - if not isinstance(cls, type): - raise RuntimeError(f"cls should be a type and not an object !: {cls}") - if not issubclass(cls, GridObjects): - raise RuntimeError(f"cls should inherit from GridObjects: {cls}") + @staticmethod + def _aux_gen_classes(cls_other, sys_path, _add_class_output=False): + if not isinstance(cls_other, type): + raise RuntimeError(f"cls_other should be a type and not an object !: {cls_other}") + if not issubclass(cls_other, GridObjects): + raise RuntimeError(f"cls_other should inherit from GridObjects: {cls_other}") from pathlib import Path - path_env = cls._PATH_GRID_CLASSES - cls._PATH_GRID_CLASSES = str(Path(self.get_path_env()).as_posix()) + path_env = cls_other._PATH_GRID_CLASSES + # cls_other._PATH_GRID_CLASSES = str(Path(self.get_path_env()).as_posix()) + cls_other._PATH_GRID_CLASSES = str(Path(sys_path).as_posix()) - res = cls._get_full_cls_str() - cls._PATH_GRID_CLASSES = path_env - output_file = os.path.join(sys_path, f"{cls.__name__}_file.py") + res = cls_other._get_full_cls_str() + cls_other._PATH_GRID_CLASSES = path_env + output_file = os.path.join(sys_path, f"{cls_other.__name__}_file.py") if not os.path.exists(output_file): # if the file is not already saved, i save it and add it to the __init__ file with open(output_file, "w", encoding="utf-8") as f: f.write(res) - return f"\nfrom .{cls.__name__}_file import {cls.__name__}" + str_import = f"\nfrom .{cls_other.__name__}_file import {cls_other.__name__}" else: # if the file exists, I check it's the same - # from grid2op.MakeEnv.UpdateEnv import _aux_hash_file, _aux_update_hash_text - # hash_saved = _aux_hash_file(output_file) - # my_hash = _aux_update_hash_text(res) - return "" + from grid2op.MakeEnv.UpdateEnv import _aux_hash_file, _aux_update_hash_text + hash_saved = _aux_hash_file(output_file) + my_hash = _aux_update_hash_text(res) + if hash_saved.hexdigest() != my_hash.hexdigest(): + raise EnvError(f"It appears some classes have been modified between what was saved on the hard drive " + f"and the current state of the grid. This should not have happened. " + f"Check class {cls_other.__name__}") + str_import = None + if not _add_class_output: + return str_import + + # NB: these imports needs to be consistent with what is done in + # griobj.init_grid(...) + package_path, nm_ = os.path.split(output_file) + nm_, ext = os.path.splitext(nm_) + sub_repo, tmp_nm = os.path.split(package_path) + if sub_repo not in sys.path: + sys.path.append(sub_repo) + + sub_repo_mod = None + if tmp_nm == "_grid2op_classes": + # legacy "experimental_read_from_local_dir" + # issue was the module "_grid2op_classes" had the same name + # regardless of the environment, so grid2op was "confused" + path_init = os.path.join(sub_repo, "__init__.py") + if not os.path.exists(path_init): + try: + with open(path_init, "w", encoding='utf-8') as f: + f.write("# DO NOT REMOVE, automatically generated by grid2op") + except FileExistsError: + pass + env_path, env_nm = os.path.split(sub_repo) + if env_path not in sys.path: + sys.path.append(env_path) + if not package_path in sys.path: + sys.path.append(package_path) + super_supermodule = importlib.import_module(env_nm) + nm_ = f"{tmp_nm}.{nm_}" + tmp_nm = env_nm + super_module = importlib.import_module(tmp_nm, package=sub_repo_mod) + add_sys_path = os.path.dirname(super_module.__file__) + if not add_sys_path in sys.path: + sys.path.append(add_sys_path) + + if f"{tmp_nm}.{nm_}" in sys.modules: + cls_res = getattr(sys.modules[f"{tmp_nm}.{nm_}"], cls_other.__name__) + return str_import, cls_res + try: + module = importlib.import_module(f".{nm_}", package=tmp_nm) + except ModuleNotFoundError as exc_: + # invalidate the cache and reload the package in this case + importlib.invalidate_caches() + importlib.reload(super_module) + module = importlib.import_module(f".{nm_}", package=tmp_nm) + cls_res = getattr(module, cls_other.__name__) + return str_import, cls_res def generate_classes(self, *, local_dir_id=None, _guard=None, _is_base_env__=True, sys_path=None): """ @@ -4027,7 +4177,6 @@ def generate_classes(self, *, local_dir_id=None, _guard=None, _is_base_env__=Tru if self.__closed: return - # create the folder if _guard is not None: raise RuntimeError("use `env.generate_classes()` with no arguments !") @@ -4048,42 +4197,79 @@ def generate_classes(self, *, local_dir_id=None, _guard=None, _is_base_env__=Tru sys_path = os.path.join(self.get_path_env(), "_grid2op_classes", local_dir_id) else: sys_path = os.path.join(self.get_path_env(), "_grid2op_classes") - + if _is_base_env__: if os.path.exists(sys_path): shutil.rmtree(sys_path) os.mkdir(sys_path) + + with open(os.path.join(sys_path, "__init__.py"), "w", encoding="utf-8") as f: + f.write(BASE_TXT_COPYRIGHT) # initialized the "__init__" file _init_txt = "" - mode = "w" - if not _is_base_env__: - _init_txt = BASE_TXT_COPYRIGHT + _init_txt - else: - # i am apppending to the __init__ file in case of obs_env - mode = "a" + mode = "a" # generate the classes - _init_txt += self._aux_gen_classes(type(self), sys_path) - _init_txt += self._aux_gen_classes(type(self.backend), sys_path) - _init_txt += self._aux_gen_classes( - self.backend._complete_action_class, sys_path + # for the environment + txt_ = self._aux_gen_classes(type(self), sys_path) + if txt_ is not None: + _init_txt += txt_ + + # for the forecast env (we do this even if it's not used) + from grid2op.Environment._forecast_env import _ForecastEnv + for_env_cls = _ForecastEnv.init_grid(type(self.backend), _local_dir_cls=self._local_dir_cls) + txt_ = self._aux_gen_classes(for_env_cls, sys_path, _add_class_output=False) + if txt_ is not None: + _init_txt += txt_ + + # for the backend + txt_, cls_res_bk = self._aux_gen_classes(type(self.backend), sys_path, _add_class_output=True) + if txt_ is not None: + _init_txt += txt_ + old_bk_cls = self.backend.__class__ + self.backend.__class__ = cls_res_bk + txt_, cls_res_complete_act = self._aux_gen_classes( + old_bk_cls._complete_action_class, sys_path, _add_class_output=True ) - _init_txt += self._aux_gen_classes(self._backend_action_class, sys_path) - _init_txt += self._aux_gen_classes(type(self.action_space), sys_path) - _init_txt += self._aux_gen_classes(self._actionClass, sys_path) - _init_txt += self._aux_gen_classes(self._complete_action_cls, sys_path) - _init_txt += self._aux_gen_classes(type(self.observation_space), sys_path) - _init_txt += self._aux_gen_classes(self._observationClass, sys_path) - _init_txt += self._aux_gen_classes( + if txt_ is not None: + _init_txt += txt_ + self.backend.__class__._complete_action_class = cls_res_complete_act + txt_, cls_res_bk_act = self._aux_gen_classes(self._backend_action_class, sys_path, _add_class_output=True) + if txt_ is not None: + _init_txt += txt_ + self._backend_action_class = cls_res_bk_act + self.backend.__class__.my_bk_act_class = cls_res_bk_act + + # for the other class + txt_ = self._aux_gen_classes(type(self.action_space), sys_path) + if txt_ is not None: + _init_txt += txt_ + txt_ = self._aux_gen_classes(self._actionClass, sys_path) + if txt_ is not None: + _init_txt += txt_ + txt_ = self._aux_gen_classes(self._complete_action_cls, sys_path) + if txt_ is not None: + _init_txt += txt_ + txt_ = self._aux_gen_classes(type(self.observation_space), sys_path) + if txt_ is not None: + _init_txt += txt_ + txt_ = self._aux_gen_classes(self._observationClass, sys_path) + if txt_ is not None: + _init_txt += txt_ + txt_ = self._aux_gen_classes( self._opponent_action_space.subtype, sys_path ) + if txt_ is not None: + _init_txt += txt_ # now do the same for the obs_env if _is_base_env__: - _init_txt += self._aux_gen_classes( + txt_ = self._aux_gen_classes( self._voltage_controler.action_space.subtype, sys_path ) + if txt_ is not None: + _init_txt += txt_ init_grid_tmp = self._observation_space.obs_env._init_grid_path self._observation_space.obs_env._init_grid_path = self._init_grid_path @@ -4096,50 +4282,6 @@ def generate_classes(self, *, local_dir_id=None, _guard=None, _is_base_env__=Tru _init_txt += "\n" with open(os.path.join(sys_path, "__init__.py"), mode, encoding="utf-8") as f: f.write(_init_txt) - - def _forget_classes(self): - """ - This function allows python to "forget" the classes created at the initialization of the environment. - - It should not be used in most cases and is reserved for internal use only. - - .. versionadded: 1.10.2 - Function added following the new behaviour introduced in this version. - - """ - from grid2op.MakeEnv.PathUtils import USE_CLASS_IN_FILE - if not USE_CLASS_IN_FILE: - return - pass - - def remove_all_class_folders(self): - """ - This function allows python to remove all the files containing all the classes - in the environment. - - .. warning:: - If you have pending grid2op "job" using this environment, they will most likely crash - so use with extra care ! - - It should not be used in most cases and is reserved for internal use only. - - .. versionadded: 1.10.2 - Function added following the new behaviour introduced in this version. - - """ - directory_path = os.path.join(self.get_path_env(), "_grid2op_classes") - try: - with os.scandir(directory_path) as entries: - for entry in entries: - try: - if entry.is_file(): - os.unlink(entry.path) - else: - shutil.rmtree(entry.path) - except (OSError, FileNotFoundError): - pass - except OSError: - pass def __del__(self): """when the environment is garbage collected, free all the memory, including cross reference to itself in the observation space.""" @@ -4298,4 +4440,17 @@ def _check_rules_correct(legalActClass): 'grid2op.BaseRules class, type provided is "{}"'.format( type(legalActClass) ) - ) \ No newline at end of file + ) + + def classes_are_in_files(self) -> bool: + """ + + Whether the classes created when this environment has been made are + store on the hard drive (will return `True`) or not. + + .. info:: + This will become the default behaviour in future grid2op versions. + + See :ref:`troubleshoot_pickle` for more information. + """ + return self._read_from_local_dir is not None diff --git a/grid2op/Environment/baseMultiProcessEnv.py b/grid2op/Environment/baseMultiProcessEnv.py index 0f76ca9d9..b2e7aecdc 100644 --- a/grid2op/Environment/baseMultiProcessEnv.py +++ b/grid2op/Environment/baseMultiProcessEnv.py @@ -325,6 +325,7 @@ def __init__(self, envs, obs_as_class=True, return_info=True, logger=None): self.obs_as_class = obs_as_class # self.__return_info = return_info self._waiting = True + self._read_from_local_dir = env._read_from_local_dir def _send_act(self, actions): for remote, action in zip(self._remotes, actions): diff --git a/grid2op/Environment/environment.py b/grid2op/Environment/environment.py index 113b20482..97b5d0a2e 100644 --- a/grid2op/Environment/environment.py +++ b/grid2op/Environment/environment.py @@ -10,7 +10,7 @@ import warnings import numpy as np import re -from typing import Optional, Union, Any, Dict, Literal +from typing import Optional, Union, Literal import grid2op from grid2op.Opponent import OpponentSpace @@ -33,7 +33,8 @@ from grid2op.Opponent import BaseOpponent, NeverAttackBudget from grid2op.operator_attention import LinearAttentionBudget from grid2op.Space import DEFAULT_N_BUSBAR_PER_SUB -from grid2op.typing_variables import RESET_OPTIONS_TYPING +from grid2op.typing_variables import RESET_OPTIONS_TYPING, N_BUSBAR_PER_SUB_TYPING +from grid2op.MakeEnv.PathUtils import USE_CLASS_IN_FILE class Environment(BaseEnv): @@ -84,7 +85,7 @@ def __init__( backend, parameters, name="unknown", - n_busbar=DEFAULT_N_BUSBAR_PER_SUB, + n_busbar : N_BUSBAR_PER_SUB_TYPING=DEFAULT_N_BUSBAR_PER_SUB, names_chronics_to_backend=None, actionClass=TopologyAction, observationClass=CompleteObservation, @@ -117,9 +118,11 @@ def __init__( _init_obs=None, _raw_backend_class=None, _compat_glop_version=None, - _read_from_local_dir=True, + _read_from_local_dir=None, _is_test=False, _allow_loaded_backend=False, + _local_dir_cls=None, # only set at the first call to `make(...)` after should be false + _overload_name_multimix=None, ): BaseEnv.__init__( self, @@ -152,17 +155,35 @@ def __init__( observation_bk_kwargs=observation_bk_kwargs, highres_sim_counter=highres_sim_counter, update_obs_after_reward=_update_obs_after_reward, - n_busbar=n_busbar, + n_busbar=n_busbar, # TODO n_busbar_per_sub different num per substations: read from a config file maybe (if not provided by the user) + name=name, + _raw_backend_class=_raw_backend_class if _raw_backend_class is not None else type(backend), _init_obs=_init_obs, _is_test=_is_test, # is this created with "test=True" # TODO not implemented !! + _local_dir_cls=_local_dir_cls, + _read_from_local_dir=_read_from_local_dir, ) + if name == "unknown": warnings.warn( 'It is NOT recommended to create an environment without "make" and EVEN LESS ' "to use an environment without a name..." ) - self.name = name - self._read_from_local_dir = _read_from_local_dir + + if _overload_name_multimix is not None: + # this means that the "make" call is issued from the + # creation of a MultiMix. + # So I use the base name instead. + self.name = "".join(_overload_name_multimix[2:]) + self.multimix_mix_name = name + self._overload_name_multimix = _overload_name_multimix + else: + self.name = name + self._overload_name_multimix = None + self.multimix_mix_name = None + # to remember if the user specified a "max_iter" at some point + self._max_iter = chronics_handler.max_iter # for all episode, set in the chronics_handler or by a call to `env.set_max_iter` + self._max_step = None # for the current episode #: starting grid2Op 1.11 classes are stored on the disk when an environment is created #: so the "environment" is created twice (one to generate the class and then correctly to load them) @@ -174,13 +195,11 @@ def __init__( self.metadata = None self.spec = None - if _raw_backend_class is None: - self._raw_backend_class = type(backend) - else: - self._raw_backend_class = _raw_backend_class - self._compat_glop_version = _compat_glop_version + # needs to be done before "_init_backend" otherwise observationClass is not defined in the + # observation space (real_env_kwargs) + self._observationClass_orig = observationClass # for plotting self._init_backend( chronics_handler, @@ -191,8 +210,6 @@ def __init__( rewardClass, legalActClass, ) - self._actionClass_orig = actionClass - self._observationClass_orig = observationClass def _init_backend( self, @@ -240,8 +257,9 @@ def _init_backend( "Impossible to use the same backend twice. Please create your environment with a " "new backend instance (new object)." ) - - need_process_backend = False + self._actionClass_orig = actionClass + + need_process_backend = False if not self.backend.is_loaded: if hasattr(self.backend, "init_pp_backend") and self.backend.init_pp_backend is not None: # hack for lightsim2grid ... @@ -254,7 +272,8 @@ def _init_backend( # example if self._read_from_local_dir is not None: # test to support pickle conveniently - self.backend._PATH_GRID_CLASSES = self.get_path_env() + # type(self.backend)._PATH_GRID_CLASSES = self.get_path_env() + self.backend._PATH_GRID_CLASSES = self._read_from_local_dir # all the above should be done in this exact order, otherwise some weird behaviour might occur # this is due to the class attribute type(self.backend).set_env_name(self.name) @@ -285,7 +304,8 @@ def _init_backend( self.load_alert_data() # to force the initialization of the backend to the proper type - self.backend.assert_grid_correct() + self.backend.assert_grid_correct( + _local_dir_cls=self._local_dir_cls) self.backend.is_loaded = True need_process_backend = True @@ -341,24 +361,26 @@ def _init_backend( # be careful here: you need to initialize from the class, and not from the object bk_type = type(self.backend) self._rewardClass = rewardClass - self._actionClass = actionClass.init_grid(gridobj=bk_type) + self._actionClass = actionClass.init_grid(gridobj=bk_type, _local_dir_cls=self._local_dir_cls) self._actionClass._add_shunt_data() self._actionClass._update_value_set() - self._observationClass = observationClass.init_grid(gridobj=bk_type) + self._observationClass = observationClass.init_grid(gridobj=bk_type, _local_dir_cls=self._local_dir_cls) - self._complete_action_cls = CompleteAction.init_grid(gridobj=bk_type) + self._complete_action_cls = CompleteAction.init_grid(gridobj=bk_type, _local_dir_cls=self._local_dir_cls) - self._helper_action_class = ActionSpace.init_grid(gridobj=bk_type) + self._helper_action_class = ActionSpace.init_grid(gridobj=bk_type, _local_dir_cls=self._local_dir_cls) self._action_space = self._helper_action_class( gridobj=bk_type, actionClass=actionClass, legal_action=self._game_rules.legal_action, + _local_dir_cls=self._local_dir_cls ) # action that affect the grid made by the environment. self._helper_action_env = self._helper_action_class( gridobj=bk_type, actionClass=CompleteAction, legal_action=self._game_rules.legal_action, + _local_dir_cls=self._local_dir_cls, ) # handles input data @@ -387,7 +409,7 @@ def _init_backend( # this needs to be done after the chronics handler: rewards might need information # about the chronics to work properly. - self._helper_observation_class = ObservationSpace.init_grid(gridobj=bk_type) + self._helper_observation_class = ObservationSpace.init_grid(gridobj=bk_type, _local_dir_cls=self._local_dir_cls) # FYI: this try to copy the backend if it fails it will modify the backend # and the environment to force the deactivation of the # forecasts @@ -399,7 +421,8 @@ def _init_backend( env=self, kwargs_observation=self._kwargs_observation, observation_bk_class=self._observation_bk_class, - observation_bk_kwargs=self._observation_bk_kwargs + observation_bk_kwargs=self._observation_bk_kwargs, + _local_dir_cls=self._local_dir_cls ) # test to make sure the backend is consistent with the chronics generator @@ -422,6 +445,7 @@ def _init_backend( gridobj=bk_type, controler_backend=self.backend, actionSpace_cls=self._helper_action_class, + _local_dir_cls=self._local_dir_cls ) # create the opponent @@ -440,7 +464,17 @@ def _init_backend( self._reset_redispatching() self._reward_to_obs = {} do_nothing = self._helper_action_env({}) + + # needs to be done at the end, but before the first "step" is called + self._observation_space.set_real_env_kwargs(self) + + # see issue https://github.com/rte-france/Grid2Op/issues/617 + # thermal limits are set AFTER this initial step + _no_overflow_disconnection = self._no_overflow_disconnection + self._no_overflow_disconnection = True *_, fail_to_start, info = self.step(do_nothing) + self._no_overflow_disconnection = _no_overflow_disconnection + if fail_to_start: raise Grid2OpException( "Impossible to initialize the powergrid, the powerflow diverge at iteration 0. " @@ -481,7 +515,7 @@ def _init_backend( # reset everything to be consistent self._reset_vectors_and_timings() - + def max_episode_duration(self): """ Return the maximum duration (in number of steps) of the current episode. @@ -492,20 +526,97 @@ def max_episode_duration(self): to the maximum 32 bit integer (usually `2147483647`) """ + if self._max_step is not None: + return self._max_step tmp = dt_int(self.chronics_handler.max_episode_duration()) if tmp < 0: tmp = dt_int(np.iinfo(dt_int).max) return tmp + def _aux_check_max_iter(self, max_iter): + try: + max_iter_int = int(max_iter) + except ValueError as exc_: + raise EnvError("Impossible to set 'max_iter' by providing something that is not an integer.") from exc_ + if max_iter_int != max_iter: + raise EnvError("Impossible to set 'max_iter' by providing something that is not an integer.") + if max_iter_int < 1 and max_iter_int != -1: + raise EnvError("'max_iter' should be an int >= 1 or -1") + return max_iter_int + def set_max_iter(self, max_iter): """ - + Set the maximum duration of an episode for all the next episodes. + + .. seealso:: + The option `max step` when calling the :func:`Environment.reset` function + used like `obs = env.reset(options={"max step": 288})` (see examples of + `env.reset` for more information) + + .. note:: + The real maximum duration of a duration depends on this parameter but also on the + size of the time series used. For example, if you use an environment with + time series lasting 8064 steps and you call `env.set_max_iter(9000)` + the maximum number of iteration will still be 8064. + + .. warning:: + It only has an impact on future episode. Said differently it also has an impact AFTER + `env.reset` has been called. + + .. danger:: + The usage of both :func:`BaseEnv.fast_forward_chronics` and :func:`Environment.set_max_iter` + is not recommended at all and might not behave correctly. Please use `env.reset` with + `obs = env.reset(options={"max step": xxx, "init ts": yyy})` for a correct behaviour. + Parameters ---------- max_iter: ``int`` - The maximum number of iteration you can do before reaching the end of the episode. Set it to "-1" for + The maximum number of iterations you can do before reaching the end of the episode. Set it to "-1" for possibly infinite episode duration. + + Examples + -------- + It can be used like this: + + .. code-block:: python + + import grid2op + env_name = "l2rpn_case14_sandbox" + + env = grid2op.make(env_name) + + obs = env.reset() + obs.max_step == 8064 # default for this environment + + env.set_max_iter(288) + # no impact here + + obs = env.reset() + obs.max_step == 288 + + # the limitation still applies to the next episode + obs = env.reset() + obs.max_step == 288 + + If you want to "unset" your limitation, you can do: + + .. code-block:: python + + env.set_max_iter(-1) + obs = env.reset() + obs.max_step == 8064 + + Finally, you cannot limit it to something larger than the duration + of the time series of the environment: + + .. code-block:: python + + env.set_max_iter(9000) + obs = env.reset() + obs.max_step == 8064 + # the call to env.set_max_iter has no impact here + Notes ------- @@ -513,7 +624,9 @@ def set_max_iter(self, max_iter): more information """ - self.chronics_handler.set_max_iter(max_iter) + max_iter_int = self._aux_check_max_iter(max_iter) + self._max_iter = max_iter_int + self.chronics_handler._set_max_iter(max_iter_int) @property def _helper_observation(self): @@ -825,9 +938,9 @@ def reset_grid(self, """ self.backend.reset( - self._init_grid_path + self._init_grid_path, ) # the real powergrid of the environment - self.backend.assert_grid_correct() + # self.backend.assert_grid_correct() if self._thermal_limit_a is not None: self.backend.set_thermal_limit(self._thermal_limit_a.astype(dt_float)) @@ -892,6 +1005,18 @@ def add_text_logger(self, logger=None): self.logger = logger return self + def _aux_get_skip_ts(self, options): + skip_ts = None + if options is not None and "init ts" in options: + try: + skip_ts = int(options["init ts"]) + except ValueError as exc_: + raise Grid2OpException("In `env.reset` the kwargs `init ts` should be convertible to an int") from exc_ + + if skip_ts != options["init ts"]: + raise Grid2OpException(f"In `env.reset` the kwargs `init ts` should be convertible to an int, found {options['init ts']}") + return skip_ts + def reset(self, *, seed: Union[int, None] = None, @@ -913,7 +1038,11 @@ def reset(self, options: dict Some options to "customize" the reset call. For example specifying the "time serie id" (grid2op >= 1.9.8) to use - or the "initial state of the grid" (grid2op >= 1.10.2). See examples for more information about this. Ignored if + or the "initial state of the grid" (grid2op >= 1.10.2) or to + start the episode at some specific time in the time series (grid2op >= 1.10.3) with the + "init ts" key. + + See examples for more information about this. Ignored if not set. Examples @@ -1035,13 +1164,113 @@ def reset(self, init_state_dict = {"set_line_status": [(0, -1)], "method": "force"} obs = env.reset(options={"init state": init_state_dict}) obs.line_status[0] is False + + .. versionadded:: 1.10.3 + + Another feature has been added in version 1.10.3, the possibility to skip the + some steps of the time series and starts at some given steps. + + The time series often always start at a given day of the week (*eg* Monday) + and at a given time (*eg* midnight). But for some reason you notice that your + agent performs poorly on other day of the week or time of the day. This might be + because it has seen much more data from Monday at midnight that from any other + day and hour of the day. + + To alleviate this issue, you can now easily reset an episode and ask grid2op + to start this episode after xxx steps have "passed". + + Concretely, you can do it with: + + .. code-block:: python + + import grid2op + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name) + + obs = env.reset(options={"init ts": 1}) + + Doing that your agent will start its episode not at midnight (which + is the case for this environment), but at 00:05 + + If you do: + + .. code-block:: python + + obs = env.reset(options={"init ts": 12}) + + In this case, you start the episode at 01:00 and not at midnight (you + start at what would have been the 12th steps) + + If you want to start the "next day", you can do: + + .. code-block:: python + + obs = env.reset(options={"init ts": 288}) + + etc. + + .. note:: + On this feature, if a powerline is on soft overflow (meaning its flow is above + the limit but below the :attr:`grid2op.Parameters.Parameters.HARD_OVERFLOW_THRESHOLD` * `the limit`) + then it is still connected (of course) and the counter + :attr:`grid2op.Observation.BaseObservation.timestep_overflow` is at 0. + + If a powerline is on "hard overflow" (meaning its flow would be above + :attr:`grid2op.Parameters.Parameters.HARD_OVERFLOW_THRESHOLD` * `the limit`), then, as it is + the case for a "normal" (without options) reset, this line is disconnected, but can be reconnected + directly (:attr:`grid2op.Observation.BaseObservation.time_before_cooldown_line` == 0) + + .. seealso:: + The function :func:`Environment.fast_forward_chronics` for an alternative usage (that will be + deprecated at some point) + + Yet another feature has been added in grid2op version 1.10.3 in this `env.reset` function. It is + the capacity to limit the duration of an episode. + + .. code-block:: python + + import grid2op + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name) + + obs = env.reset(options={"max step": 288}) + + This will limit the duration to 288 steps (1 day), meaning your agent + will have successfully managed the entire episode if it manages to keep + the grid in a safe state for a whole day (depending on the environment you are + using the default duration is either one week - roughly 2016 steps or 4 weeks) + + .. note:: + This option only affect the current episode. It will have no impact on the + next episode (after reset) + + For example: + + .. code-block:: python + + obs = env.reset() + obs.max_step == 8064 # default for this environment + + obs = env.reset(options={"max step": 288}) + obs.max_step == 288 # specified by the option + + obs = env.reset() + obs.max_step == 8064 # retrieve the default behaviour + + .. seealso:: + The function :func:`Environment.set_max_iter` for an alternative usage with the different + that `set_max_iter` is permenanent: it impacts all the future episodes and not only + the next one. """ # process the "options" kwargs # (if there is an init state then I need to process it to remove the # some keys) + self._max_step = None method = "combine" init_state = None + skip_ts = self._aux_get_skip_ts(options) + max_iter_int = None if options is not None and "init state" in options: act_as_dict = options["init state"] if isinstance(act_as_dict, dict): @@ -1061,7 +1290,18 @@ def reset(self, init_state.remove_change() super().reset(seed=seed, options=options) - + + if options is not None and "max step" in options: + # use the "max iter" provided in the options + max_iter_int = self._aux_check_max_iter(options["max step"]) + if skip_ts is not None: + max_iter_chron = max_iter_int + skip_ts + else: + max_iter_chron = max_iter_int + self.chronics_handler._set_max_iter(max_iter_chron) + else: + # reset previous max iter to value set with `env.set_max_iter(...)` (or -1 by default) + self.chronics_handler._set_max_iter(self._max_iter) self.chronics_handler.next_chronics() self.chronics_handler.initialize( self.backend.name_load, @@ -1070,6 +1310,10 @@ def reset(self, self.backend.name_sub, names_chronics_to_backend=self._names_chronics_to_backend, ) + if max_iter_int is not None: + self._max_step = min(max_iter_int, self.chronics_handler.real_data.max_iter - (skip_ts if skip_ts is not None else 0)) + else: + self._max_step = None self._env_modification = None self._reset_maintenance() self._reset_redispatching() @@ -1079,6 +1323,20 @@ def reset(self, if self.viewer_fig is not None: del self.viewer_fig self.viewer_fig = None + + if skip_ts is not None: + self._reset_vectors_and_timings() + + if skip_ts < 1: + raise Grid2OpException(f"In `env.reset` the kwargs `init ts` should be an int >= 1, found {options['init ts']}") + if skip_ts == 1: + self._init_obs = None + self.step(self.action_space()) + elif skip_ts == 2: + self.fast_forward_chronics(1) + else: + self.fast_forward_chronics(skip_ts) + # if True, then it will not disconnect lines above their thermal limits self._reset_vectors_and_timings() # and it needs to be done AFTER to have proper timings at tbe beginning # the attention budget is reset above @@ -1157,17 +1415,15 @@ def render(self, mode="rgb_array"): return rgb_array def _custom_deepcopy_for_copy(self, new_obj): - super()._custom_deepcopy_for_copy(new_obj) - - new_obj.name = self.name - new_obj._read_from_local_dir = self._read_from_local_dir new_obj.metadata = copy.deepcopy(self.metadata) new_obj.spec = copy.deepcopy(self.spec) - new_obj._raw_backend_class = self._raw_backend_class new_obj._compat_glop_version = self._compat_glop_version - new_obj._actionClass_orig = self._actionClass_orig - new_obj._observationClass_orig = self._observationClass_orig + new_obj._max_iter = self._max_iter + new_obj._max_step = self._max_step + new_obj._overload_name_multimix = self._overload_name_multimix + new_obj.multimix_mix_name = self.multimix_mix_name + super()._custom_deepcopy_for_copy(new_obj) def copy(self) -> "Environment": """ @@ -1861,6 +2117,7 @@ def get_params_for_runner(self): res["envClass"] = Environment # TODO ! res["gridStateclass"] = self.chronics_handler.chronicsClass res["backendClass"] = self._raw_backend_class + res["_overload_name_multimix"] = self._overload_name_multimix if hasattr(self.backend, "_my_kwargs"): res["backend_kwargs"] = self.backend._my_kwargs else: @@ -1900,6 +2157,7 @@ def get_params_for_runner(self): res["kwargs_attention_budget"] = copy.deepcopy(self._kwargs_attention_budget) res["has_attention_budget"] = self._has_attention_budget res["_read_from_local_dir"] = self._read_from_local_dir + res["_local_dir_cls"] = self._local_dir_cls # should be transfered to the runner so that folder is not deleted while runner exists res["logger"] = self.logger res["kwargs_observation"] = copy.deepcopy(self._kwargs_observation) res["observation_bk_class"] = self._observation_bk_class @@ -1943,7 +2201,10 @@ def init_obj_from_kwargs(cls, observation_bk_kwargs, _raw_backend_class, _read_from_local_dir, - n_busbar=DEFAULT_N_BUSBAR_PER_SUB): + _local_dir_cls, + _overload_name_multimix, + n_busbar=DEFAULT_N_BUSBAR_PER_SUB + ): res = cls(init_env_path=init_env_path, init_grid_path=init_grid_path, chronics_handler=chronics_handler, @@ -1976,7 +2237,9 @@ def init_obj_from_kwargs(cls, observation_bk_kwargs=observation_bk_kwargs, n_busbar=int(n_busbar), _raw_backend_class=_raw_backend_class, - _read_from_local_dir=_read_from_local_dir) + _read_from_local_dir=_read_from_local_dir, + _local_dir_cls=_local_dir_cls, + _overload_name_multimix=_overload_name_multimix) return res def generate_data(self, nb_year=1, nb_core=1, seed=None, **kwargs): @@ -2052,3 +2315,20 @@ def generate_data(self, nb_year=1, nb_core=1, seed=None, **kwargs): env=self, seed=seed, nb_scenario=nb_year, nb_core=nb_core, **kwargs ) + + def _add_classes_in_files(self, sys_path, bk_type, are_classes_in_files): + if are_classes_in_files: + # then generate the proper classes + _PATH_GRID_CLASSES = bk_type._PATH_GRID_CLASSES + try: + bk_type._PATH_GRID_CLASSES = None + my_type_tmp = type(self).init_grid(gridobj=bk_type, _local_dir_cls=None) + txt_, cls_res_me = self._aux_gen_classes(my_type_tmp, + sys_path, + _add_class_output=True) + # then add the class to the init file + with open(os.path.join(sys_path, "__init__.py"), "a", encoding="utf-8") as f: + f.write(txt_) + finally: + # make sure to put back the correct _PATH_GRID_CLASSES + bk_type._PATH_GRID_CLASSES = _PATH_GRID_CLASSES diff --git a/grid2op/Environment/maskedEnvironment.py b/grid2op/Environment/maskedEnvironment.py index bd7caaffa..e3c55a7d9 100644 --- a/grid2op/Environment/maskedEnvironment.py +++ b/grid2op/Environment/maskedEnvironment.py @@ -8,14 +8,17 @@ import copy import numpy as np +import os from typing import Tuple, Union, List + from grid2op.Environment.environment import Environment from grid2op.Exceptions import EnvError from grid2op.dtypes import dt_bool, dt_float, dt_int from grid2op.Space import DEFAULT_N_BUSBAR_PER_SUB +from grid2op.MakeEnv.PathUtils import USE_CLASS_IN_FILE -class MaskedEnvironment(Environment): # TODO heritage ou alors on met un truc de base +class MaskedEnvironment(Environment): """This class is the grid2op implementation of a "maked" environment: lines not in the `lines_of_interest` mask will NOT be deactivated by the environment is the flow is too high (or moderately high for too long.) @@ -25,6 +28,29 @@ class MaskedEnvironment(Environment): # TODO heritage ou alors on met un truc d .. warning:: At time of writing, the behaviour of "obs.simulate" is not modified + + Examples + --------- + + We recommend you build such an environment with: + + .. code-block:: python + + import grid2op + from grid2op.Environment import MaskedEnvironment + + env_name = "l2rpn_case14_sandbox" + lines_of_interest = np.array([True, True, True, True, True, True, + False, False, False, False, False, False, + False, False, False, False, False, False, + False, False]) + env = MaskedEnvironment(grid2op.make(env_name), + lines_of_interest=lines_of_interest) + + + In particular, make sure to use `grid2op.make(...)` when creating the MaskedEnvironment + and not to use another environment. + """ # some kind of infinity value # NB we multiply np.finfo(dt_float).max by a small number (1e-7) to avoid overflow @@ -40,20 +66,32 @@ def __init__(self, self._lines_of_interest = self._make_lines_of_interest(lines_of_interest) if isinstance(grid2op_env, Environment): - super().__init__(**grid2op_env.get_kwargs()) + kwargs = grid2op_env.get_kwargs() + if grid2op_env.classes_are_in_files(): + # I need to build the classes + + # first take the "ownership" of the tmp directory + kwargs["_local_dir_cls"] = grid2op_env._local_dir_cls + grid2op_env._local_dir_cls = None + + # then generate the proper classes + sys_path = os.path.abspath(kwargs["_local_dir_cls"].name) + bk_type = type(grid2op_env.backend) + self._add_classes_in_files(sys_path, bk_type, grid2op_env.classes_are_in_files()) + super().__init__(**kwargs) elif isinstance(grid2op_env, dict): super().__init__(**grid2op_env) else: raise EnvError(f"For MaskedEnvironment you need to provide " f"either an Environment or a dict " f"for grid2op_env. You provided: {type(grid2op_env)}") + # if self._lines_of_interest.size() != type(self).n_line: + # raise EnvError("Impossible to init A masked environment when the number of lines " + # "of the mask do not match the number of lines on the grid.") def _make_lines_of_interest(self, lines_of_interest): # NB is called BEFORE the env has been created... if isinstance(lines_of_interest, np.ndarray): - # if lines_of_interest.size() != type(self).n_line: - # raise EnvError("Impossible to init A masked environment when the number of lines " - # "of the mask do not match the number of lines on the grid.") res = lines_of_interest.astype(dt_bool) if res.sum() == 0: raise EnvError("You cannot use MaskedEnvironment and masking all " @@ -89,6 +127,7 @@ def _custom_deepcopy_for_copy(self, new_obj): @classmethod def init_obj_from_kwargs(cls, + *, other_env_kwargs, init_env_path, init_grid_path, @@ -122,39 +161,49 @@ def init_obj_from_kwargs(cls, observation_bk_kwargs, _raw_backend_class, _read_from_local_dir, + _overload_name_multimix, + _local_dir_cls, n_busbar=DEFAULT_N_BUSBAR_PER_SUB): - res = MaskedEnvironment(grid2op_env={"init_env_path": init_env_path, - "init_grid_path": init_grid_path, - "chronics_handler": chronics_handler, - "backend": backend, - "parameters": parameters, - "name": name, - "names_chronics_to_backend": names_chronics_to_backend, - "actionClass": actionClass, - "observationClass": observationClass, - "rewardClass": rewardClass, - "legalActClass": legalActClass, - "voltagecontrolerClass": voltagecontrolerClass, - "other_rewards": other_rewards, - "opponent_space_type": opponent_space_type, - "opponent_action_class": opponent_action_class, - "opponent_class": opponent_class, - "opponent_init_budget": opponent_init_budget, - "opponent_budget_per_ts": opponent_budget_per_ts, - "opponent_budget_class": opponent_budget_class, - "opponent_attack_duration": opponent_attack_duration, - "opponent_attack_cooldown": opponent_attack_cooldown, - "kwargs_opponent": kwargs_opponent, - "with_forecast": with_forecast, - "attention_budget_cls": attention_budget_cls, - "kwargs_attention_budget": kwargs_attention_budget, - "has_attention_budget": has_attention_budget, - "logger": logger, - "kwargs_observation": kwargs_observation, - "observation_bk_class": observation_bk_class, - "observation_bk_kwargs": observation_bk_kwargs, - "n_busbar": int(n_busbar), - "_raw_backend_class": _raw_backend_class, - "_read_from_local_dir": _read_from_local_dir}, - **other_env_kwargs) + grid2op_env = {"init_env_path": init_env_path, + "init_grid_path": init_grid_path, + "chronics_handler": chronics_handler, + "backend": backend, + "parameters": parameters, + "name": name, + "names_chronics_to_backend": names_chronics_to_backend, + "actionClass": actionClass, + "observationClass": observationClass, + "rewardClass": rewardClass, + "legalActClass": legalActClass, + "voltagecontrolerClass": voltagecontrolerClass, + "other_rewards": other_rewards, + "opponent_space_type": opponent_space_type, + "opponent_action_class": opponent_action_class, + "opponent_class": opponent_class, + "opponent_init_budget": opponent_init_budget, + "opponent_budget_per_ts": opponent_budget_per_ts, + "opponent_budget_class": opponent_budget_class, + "opponent_attack_duration": opponent_attack_duration, + "opponent_attack_cooldown": opponent_attack_cooldown, + "kwargs_opponent": kwargs_opponent, + "with_forecast": with_forecast, + "attention_budget_cls": attention_budget_cls, + "kwargs_attention_budget": kwargs_attention_budget, + "has_attention_budget": has_attention_budget, + "logger": logger, + "kwargs_observation": kwargs_observation, + "observation_bk_class": observation_bk_class, + "observation_bk_kwargs": observation_bk_kwargs, + "n_busbar": int(n_busbar), + "_raw_backend_class": _raw_backend_class, + "_read_from_local_dir": _read_from_local_dir, + "_local_dir_cls": _local_dir_cls, + "_overload_name_multimix": _overload_name_multimix} + if not "lines_of_interest" in other_env_kwargs: + raise EnvError("You cannot make a MaskedEnvironment without providing the list of lines of interest") + for el in other_env_kwargs: + if el == "lines_of_interest": + continue + warnings.warn(f"kwargs {el} provided to make the environment will be ignored") + res = MaskedEnvironment(grid2op_env, lines_of_interest=other_env_kwargs["lines_of_interest"]) return res diff --git a/grid2op/Environment/multiMixEnv.py b/grid2op/Environment/multiMixEnv.py index e6ba1a646..be2508478 100644 --- a/grid2op/Environment/multiMixEnv.py +++ b/grid2op/Environment/multiMixEnv.py @@ -16,6 +16,9 @@ from grid2op.Space import GridObjects, RandomObject, DEFAULT_N_BUSBAR_PER_SUB from grid2op.Exceptions import EnvError, Grid2OpException from grid2op.Observation import BaseObservation +from grid2op.MakeEnv.PathUtils import USE_CLASS_IN_FILE +from grid2op.Environment.baseEnv import BaseEnv +from grid2op.typing_variables import STEP_INFO_TYPING, RESET_OPTIONS_TYPING class MultiMixEnvironment(GridObjects, RandomObject): @@ -154,13 +157,13 @@ class MultiMixEnvironment(GridObjects, RandomObject): """ - KEYS_RESET_OPTIONS = {"time serie id"} + KEYS_RESET_OPTIONS = BaseEnv.KEYS_RESET_OPTIONS def __init__( self, envs_dir, logger=None, - experimental_read_from_local_dir=False, + experimental_read_from_local_dir=None, n_busbar=DEFAULT_N_BUSBAR_PER_SUB, _add_to_name="", # internal, for test only, do not use ! _compat_glop_version=None, # internal, for test only, do not use ! @@ -174,6 +177,10 @@ def __init__( self.mix_envs = [] self._env_dir = os.path.abspath(envs_dir) self.__closed = False + self._do_not_erase_local_dir_cls = False + self._local_dir_cls = None + if not os.path.exists(envs_dir): + raise EnvError(f"There is nothing at {envs_dir}") # Special case handling for backend # TODO: with backend.copy() instead ! backendClass = None @@ -184,76 +191,170 @@ def __init__( # was introduced in grid2op 1.7.1 backend_kwargs = kwargs["backend"]._my_kwargs del kwargs["backend"] - - # Inline import to prevent cyclical import - from grid2op.MakeEnv.Make import make - + + li_mix_nms = [mix_name for mix_name in sorted(os.listdir(envs_dir)) if os.path.isdir(os.path.join(envs_dir, mix_name))] + if not li_mix_nms: + raise EnvError("We did not find any mix in this multi-mix environment.") + + # Make sure GridObject class attributes are set from first env + # Should be fine since the grid is the same for all envs + multi_env_name = (None, envs_dir, os.path.basename(os.path.abspath(envs_dir)), _add_to_name) + env_for_init = self._aux_create_a_mix(envs_dir, + li_mix_nms[0], + logger, + backendClass, + backend_kwargs, + _add_to_name, + _compat_glop_version, + n_busbar, + _test, + experimental_read_from_local_dir, + multi_env_name, + kwargs) + + cls_res_me = self._aux_add_class_file(env_for_init) + if cls_res_me is not None: + self.__class__ = cls_res_me + else: + self.__class__ = type(self).init_grid(type(env_for_init.backend), _local_dir_cls=env_for_init._local_dir_cls) + self.mix_envs.append(env_for_init) + self._local_dir_cls = env_for_init._local_dir_cls + # TODO reuse same observation_space and action_space in all the envs maybe ? + multi_env_name = (type(env_for_init)._PATH_GRID_CLASSES, *multi_env_name[1:]) try: - for env_dir in sorted(os.listdir(envs_dir)): - env_path = os.path.join(envs_dir, env_dir) - if not os.path.isdir(env_path): + for mix_name in li_mix_nms[1:]: + mix_path = os.path.join(envs_dir, mix_name) + if not os.path.isdir(mix_path): continue - this_logger = ( - logger.getChild(f"MultiMixEnvironment_{env_dir}") - if logger is not None - else None - ) - # Special case for backend - if backendClass is not None: - try: - # should pass with grid2op >= 1.7.1 - bk = backendClass(**backend_kwargs) - except TypeError as exc_: - # with grid2Op version prior to 1.7.1 - # you might have trouble with - # "TypeError: __init__() got an unexpected keyword argument 'can_be_copied'" - msg_ = ("Impossible to create a backend for each mix using the " - "backend key-word arguments. Falling back to creating " - "with no argument at all (default behaviour with grid2op <= 1.7.0).") - warnings.warn(msg_) - bk = backendClass() - env = make( - env_path, - backend=bk, - _add_to_name=_add_to_name, - _compat_glop_version=_compat_glop_version, - n_busbar=n_busbar, - test=_test, - logger=this_logger, - experimental_read_from_local_dir=experimental_read_from_local_dir, - **kwargs, - ) - else: - env = make( - env_path, - n_busbar=n_busbar, - _add_to_name=_add_to_name, - _compat_glop_version=_compat_glop_version, - test=_test, - logger=this_logger, - experimental_read_from_local_dir=experimental_read_from_local_dir, - **kwargs, - ) - self.mix_envs.append(env) + mix = self._aux_create_a_mix(envs_dir, + mix_name, + logger, + backendClass, + backend_kwargs, + _add_to_name, + _compat_glop_version, + n_busbar, + _test, + experimental_read_from_local_dir, + multi_env_name, + kwargs) + self.mix_envs.append(mix) except Exception as exc_: - err_msg = "MultiMix environment creation failed: {}".format(exc_) - raise EnvError(err_msg) + err_msg = "MultiMix environment creation failed at the creation of the first mix. Error: {}".format(exc_) + raise EnvError(err_msg) from exc_ if len(self.mix_envs) == 0: err_msg = "MultiMix envs_dir did not contain any valid env" raise EnvError(err_msg) + # tell every mix the "MultiMix" is responsible for deleting the + # folder that stores the classes definition + for el in self.mix_envs: + el._do_not_erase_local_dir_cls = True self.env_index = 0 self.current_env = self.mix_envs[self.env_index] - # Make sure GridObject class attributes are set from first env - # Should be fine since the grid is the same for all envs - multi_env_name = os.path.basename(os.path.abspath(envs_dir)) + _add_to_name - save_env_name = self.current_env.env_name - self.current_env.env_name = multi_env_name - self.__class__ = self.init_grid(self.current_env) - self.current_env.env_name = save_env_name + # legacy behaviour (using experimental_read_from_local_dir kwargs in env.make) + if self._read_from_local_dir is not None: + if os.path.split(self._read_from_local_dir)[1] == "_grid2op_classes": + self._do_not_erase_local_dir_cls = True + else: + self._do_not_erase_local_dir_cls = True + + def _aux_aux_add_class_file(self, sys_path, env_for_init): + # used for the old behaviour (setting experimental_read_from_local_dir=True in make) + bk_type = type(env_for_init.backend) + _PATH_GRID_CLASSES = bk_type._PATH_GRID_CLASSES + cls_res_me = None + try: + bk_type._PATH_GRID_CLASSES = None + my_type_tmp = MultiMixEnvironment.init_grid(gridobj=bk_type, _local_dir_cls=None) + txt_, cls_res_me = BaseEnv._aux_gen_classes(my_type_tmp, + sys_path, + _add_class_output=True) + # then add the class to the init file + with open(os.path.join(sys_path, "__init__.py"), "a", encoding="utf-8") as f: + f.write(txt_) + finally: + # make sure to put back the correct _PATH_GRID_CLASSES + bk_type._PATH_GRID_CLASSES = _PATH_GRID_CLASSES + return cls_res_me + + def _aux_add_class_file(self, env_for_init): + # used for the "new" bahviour for grid2op make (automatic read from local dir) + if env_for_init.classes_are_in_files() and env_for_init._local_dir_cls is not None: + sys_path = os.path.abspath(env_for_init._local_dir_cls.name) + self._local_dir_cls = env_for_init._local_dir_cls + env_for_init._local_dir_cls = None + # then generate the proper classes + cls_res_me = self._aux_aux_add_class_file(sys_path, env_for_init) + return cls_res_me + return None + + def _aux_create_a_mix(self, + envs_dir, + mix_name, + logger, + backendClass, + backend_kwargs, + _add_to_name, + _compat_glop_version, + n_busbar, + _test, + experimental_read_from_local_dir, + multi_env_name, + kwargs + ): + # Inline import to prevent cyclical import + from grid2op.MakeEnv.Make import make + + this_logger = ( + logger.getChild(f"MultiMixEnvironment_{mix_name}") + if logger is not None + else None + ) + mix_path = os.path.join(envs_dir, mix_name) + # Special case for backend + if backendClass is not None: + try: + # should pass with grid2op >= 1.7.1 + bk = backendClass(**backend_kwargs) + except TypeError as exc_: + # with grid2Op version prior to 1.7.1 + # you might have trouble with + # "TypeError: __init__() got an unexpected keyword argument 'can_be_copied'" + msg_ = ("Impossible to create a backend for each mix using the " + "backend key-word arguments. Falling back to creating " + "with no argument at all (default behaviour with grid2op <= 1.7.0).") + warnings.warn(msg_) + bk = backendClass() + mix = make( + mix_path, + backend=bk, + _add_to_name=_add_to_name, + _compat_glop_version=_compat_glop_version, + n_busbar=n_busbar, + test=_test, + logger=this_logger, + experimental_read_from_local_dir=experimental_read_from_local_dir, + _overload_name_multimix=multi_env_name, + **kwargs, + ) + else: + mix = make( + mix_path, + n_busbar=n_busbar, + _add_to_name=_add_to_name, + _compat_glop_version=_compat_glop_version, + test=_test, + logger=this_logger, + experimental_read_from_local_dir=experimental_read_from_local_dir, + _overload_name_multimix=multi_env_name, + **kwargs, + ) + return mix + def get_path_env(self): """ Get the path that allows to create this environment. @@ -304,11 +405,13 @@ def __next__(self): def __getattr__(self, name): # TODO what if name is an integer ? make it possible to loop with integer here + if self.__closed: + raise EnvError("This environment is closed, you cannot use it.") return getattr(self.current_env, name) def keys(self): for mix in self.mix_envs: - yield mix.name + yield mix.multimix_mix_name def values(self): for mix in self.mix_envs: @@ -316,7 +419,7 @@ def values(self): def items(self): for mix in self.mix_envs: - yield mix.name, mix + yield mix.multimix_mix_name, mix def copy(self): if self.__closed: @@ -326,6 +429,11 @@ def copy(self): current_env = self.current_env self.current_env = None + # do not copy these attributes + _local_dir_cls = self._local_dir_cls + self._local_dir_cls = None + + # create the new object and copy the normal attribute cls = self.__class__ res = cls.__new__(cls) for k in self.__dict__: @@ -333,11 +441,17 @@ def copy(self): # this is handled elsewhere continue setattr(res, k, copy.deepcopy(getattr(self, k))) + # now deal with the mixes res.mix_envs = [mix.copy() for mix in mix_envs] res.current_env = res.mix_envs[res.env_index] - + # finally deal with the ownership of the class folder + res._local_dir_cls = _local_dir_cls + res._do_not_erase_local_dir_cls = True + + # put back attributes of `self` that have been put aside self.mix_envs = mix_envs self.current_env = current_env + self._local_dir_cls = _local_dir_cls return res def __getitem__(self, key): @@ -360,7 +474,7 @@ def __getitem__(self, key): raise EnvError("This environment is closed, you cannot use it.") # Search for key for mix in self.mix_envs: - if mix.name == key: + if mix.multimix_mix_name == key: return mix # Not found by name @@ -370,7 +484,7 @@ def reset(self, *, seed: Union[int, None] = None, random=False, - options: Union[Dict[Union[str, Literal["time serie id"]], Union[int, str]], None] = None) -> BaseObservation: + options: RESET_OPTIONS_TYPING = None) -> BaseObservation: if self.__closed: raise EnvError("This environment is closed, you cannot use it.") @@ -389,13 +503,7 @@ def reset(self, self.env_index = (self.env_index + 1) % len(self.mix_envs) self.current_env = self.mix_envs[self.env_index] - - if options is not None and "time serie id" in options: - self.set_id(options["time serie id"]) - - if seed is not None: - self.seed(seed) - return self.current_env.reset() + return self.current_env.reset(seed=seed, options=options) def seed(self, seed=None): """ @@ -490,7 +598,17 @@ def close(self): for mix in self.mix_envs: mix.close() + self.__closed = True + + # free the resources (temporary directory) + if self._do_not_erase_local_dir_cls: + # The resources are not held by this env, so + # I do not remove them + # (case for ObsEnv or ForecastedEnv) + return + BaseEnv._aux_close_local_dir_cls(self) + def attach_layout(self, grid_layout): if self.__closed: @@ -504,7 +622,12 @@ def __del__(self): self.close() def generate_classes(self): - # TODO this is not really a good idea, as the multi-mix itself is not read from the - # files ! - for mix in self.mix_envs: - mix.generate_classes() + mix_for_classes = self.mix_envs[0] + path_cls = os.path.join(mix_for_classes.get_path_env(), "_grid2op_classes") + if not os.path.exists(path_cls): + try: + os.mkdir(path_cls) + except FileExistsError: + pass + mix_for_classes.generate_classes() + self._aux_aux_add_class_file(path_cls, mix_for_classes) diff --git a/grid2op/Environment/timedOutEnv.py b/grid2op/Environment/timedOutEnv.py index 2b7c16d85..a1952f99a 100644 --- a/grid2op/Environment/timedOutEnv.py +++ b/grid2op/Environment/timedOutEnv.py @@ -9,12 +9,14 @@ import time from math import floor from typing import Any, Dict, Tuple, Union, List, Literal - +import os + from grid2op.Environment.environment import Environment from grid2op.Action import BaseAction from grid2op.Observation import BaseObservation from grid2op.Exceptions import EnvError from grid2op.Space import DEFAULT_N_BUSBAR_PER_SUB +from grid2op.MakeEnv.PathUtils import USE_CLASS_IN_FILE class TimedOutEnvironment(Environment): # TODO heritage ou alors on met un truc de base @@ -71,7 +73,19 @@ def __init__(self, self._nb_dn_last = 0 self._is_init_dn = False if isinstance(grid2op_env, Environment): - super().__init__(**grid2op_env.get_kwargs()) + kwargs = grid2op_env.get_kwargs() + if grid2op_env.classes_are_in_files(): + # I need to build the classes + + # first take the "ownership" of the tmp directory + kwargs["_local_dir_cls"] = grid2op_env._local_dir_cls + grid2op_env._local_dir_cls = None + + # then generate the proper classes + sys_path = os.path.abspath(kwargs["_local_dir_cls"].name) + bk_type = type(grid2op_env.backend) + self._add_classes_in_files(sys_path, bk_type, grid2op_env.classes_are_in_files()) + super().__init__(**kwargs) elif isinstance(grid2op_env, dict): super().__init__(**grid2op_env) else: @@ -182,6 +196,7 @@ def get_params_for_runner(self): @classmethod def init_obj_from_kwargs(cls, + *, other_env_kwargs, init_env_path, init_grid_path, @@ -215,41 +230,51 @@ def init_obj_from_kwargs(cls, observation_bk_kwargs, _raw_backend_class, _read_from_local_dir, + _local_dir_cls, + _overload_name_multimix, n_busbar=DEFAULT_N_BUSBAR_PER_SUB): - res = TimedOutEnvironment(grid2op_env={"init_env_path": init_env_path, - "init_grid_path": init_grid_path, - "chronics_handler": chronics_handler, - "backend": backend, - "parameters": parameters, - "name": name, - "names_chronics_to_backend": names_chronics_to_backend, - "actionClass": actionClass, - "observationClass": observationClass, - "rewardClass": rewardClass, - "legalActClass": legalActClass, - "voltagecontrolerClass": voltagecontrolerClass, - "other_rewards": other_rewards, - "opponent_space_type": opponent_space_type, - "opponent_action_class": opponent_action_class, - "opponent_class": opponent_class, - "opponent_init_budget": opponent_init_budget, - "opponent_budget_per_ts": opponent_budget_per_ts, - "opponent_budget_class": opponent_budget_class, - "opponent_attack_duration": opponent_attack_duration, - "opponent_attack_cooldown": opponent_attack_cooldown, - "kwargs_opponent": kwargs_opponent, - "with_forecast": with_forecast, - "attention_budget_cls": attention_budget_cls, - "kwargs_attention_budget": kwargs_attention_budget, - "has_attention_budget": has_attention_budget, - "logger": logger, - "kwargs_observation": kwargs_observation, - "observation_bk_class": observation_bk_class, - "observation_bk_kwargs": observation_bk_kwargs, - "_raw_backend_class": _raw_backend_class, - "_read_from_local_dir": _read_from_local_dir, - "n_busbar": int(n_busbar)}, - **other_env_kwargs) + grid2op_env={"init_env_path": init_env_path, + "init_grid_path": init_grid_path, + "chronics_handler": chronics_handler, + "backend": backend, + "parameters": parameters, + "name": name, + "names_chronics_to_backend": names_chronics_to_backend, + "actionClass": actionClass, + "observationClass": observationClass, + "rewardClass": rewardClass, + "legalActClass": legalActClass, + "voltagecontrolerClass": voltagecontrolerClass, + "other_rewards": other_rewards, + "opponent_space_type": opponent_space_type, + "opponent_action_class": opponent_action_class, + "opponent_class": opponent_class, + "opponent_init_budget": opponent_init_budget, + "opponent_budget_per_ts": opponent_budget_per_ts, + "opponent_budget_class": opponent_budget_class, + "opponent_attack_duration": opponent_attack_duration, + "opponent_attack_cooldown": opponent_attack_cooldown, + "kwargs_opponent": kwargs_opponent, + "with_forecast": with_forecast, + "attention_budget_cls": attention_budget_cls, + "kwargs_attention_budget": kwargs_attention_budget, + "has_attention_budget": has_attention_budget, + "logger": logger, + "kwargs_observation": kwargs_observation, + "observation_bk_class": observation_bk_class, + "observation_bk_kwargs": observation_bk_kwargs, + "_raw_backend_class": _raw_backend_class, + "_read_from_local_dir": _read_from_local_dir, + "n_busbar": int(n_busbar), + "_local_dir_cls": _local_dir_cls, + "_overload_name_multimix": _overload_name_multimix} + if not "time_out_ms" in other_env_kwargs: + raise EnvError("You cannot make a MaskedEnvironment without providing the list of lines of interest") + for el in other_env_kwargs: + if el == "time_out_ms": + continue + warnings.warn(f"kwargs {el} provided to make the environment will be ignored") + res = TimedOutEnvironment(grid2op_env, time_out_ms=other_env_kwargs["time_out_ms"]) return res diff --git a/grid2op/Episode/CompactEpisodeData.py b/grid2op/Episode/CompactEpisodeData.py index 30a138311..e5cdabf9d 100644 --- a/grid2op/Episode/CompactEpisodeData.py +++ b/grid2op/Episode/CompactEpisodeData.py @@ -300,3 +300,17 @@ def list_episode(path): def __len__(self): return self.game_over_timestep + + def make_serializable(self): + """ + INTERNAL + + .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ + Used by he runner to serialize properly an episode + + Called in the _aux_run_one_episode (one of the Runner auxilliary function) to make + sure the EpisodeData can be sent back to the main process withtout issue (otherwise + there is a complain about the _ObsEnv) + """ + from grid2op.Episode.EpisodeData import EpisodeData + EpisodeData._aux_make_obs_space_serializable(self) diff --git a/grid2op/Episode/EpisodeData.py b/grid2op/Episode/EpisodeData.py index e06ac7325..1925fd7ba 100644 --- a/grid2op/Episode/EpisodeData.py +++ b/grid2op/Episode/EpisodeData.py @@ -800,6 +800,48 @@ def to_disk(self): dict_ = {"version": f"{grid2op.__version__}"} json.dump(obj=dict_, fp=f, indent=4, sort_keys=True) + def _aux_make_obs_space_serializable(self): + """I put it here because it's also used by CompactEpisodeData. + + The only requirement is that `self` has an attribute `observation_space` which is a + valid grid2op ObservationSpace""" + if self.observation_space is None: + return + from grid2op.Environment._obsEnv import _ObsEnv + # remove the observation_env of the observation_space + self.observation_space = self.observation_space.copy(copy_backend=True) + self.observation_space._backend_obs.close() + self.observation_space._backend_obs = None + self.observation_space.obs_env.close() + self.observation_space.obs_env = None + self.observation_space._ObsEnv_class = _ObsEnv + self.observation_space._real_env_kwargs = None + self.observation_space._template_obj._obs_env = None + self.observation_space._template_obj._ptr_kwargs_env = None + self.observation_space._empty_obs._obs_env = None + self.observation_space._empty_obs._ptr_kwargs_env = None + self.observation_space._deactivate_simulate(None) + + def make_serializable(self): + """ + INTERNAL + + .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ + Used by he runner to serialize properly an episode + + Called in the _aux_run_one_episode (one of the Runner auxilliary function) to make + sure the EpisodeData can be sent back to the main process withtout issue (otherwise + there is a complain about the _ObsEnv) + """ + self._aux_make_obs_space_serializable() + # remove the observation_env of the observation + for el in self.observations.objects: + if el is not None: + el._obs_env = None + el._ptr_kwargs_env = None + + self.observations.helper = self.observation_space + @staticmethod def get_grid2op_version(path_episode): """ diff --git a/grid2op/MakeEnv/Make.py b/grid2op/MakeEnv/Make.py index 4692c6743..15bd5b6c3 100644 --- a/grid2op/MakeEnv/Make.py +++ b/grid2op/MakeEnv/Make.py @@ -250,12 +250,14 @@ def _aux_make_multimix( n_busbar=2, _add_to_name="", _compat_glop_version=None, + _overload_name_multimix=None, logger=None, **kwargs ) -> Environment: # Local import to prevent imports loop from grid2op.Environment import MultiMixEnvironment - + if _overload_name_multimix is not None: + raise RuntimeError("You should not create a MultiMix with `_overload_name_multimix`.") return MultiMixEnvironment( dataset_path, experimental_read_from_local_dir=experimental_read_from_local_dir, @@ -268,6 +270,15 @@ def _aux_make_multimix( ) +def _get_path_multimix(_overload_name_multimix) -> str: + baseenv_path, multi_mix_name, add_to_name = _overload_name_multimix + if os.path.exists(baseenv_path): + return baseenv_path + if multi_mix_name in TEST_DEV_ENVS: + return TEST_DEV_ENVS[multi_mix_name] + raise Grid2OpException(f"Unknown multimix environment with name {multi_mix_name} that should be located at {baseenv_path}.") + + def make( dataset : Union[str, os.PathLike], *, @@ -277,6 +288,7 @@ def make( n_busbar=2, _add_to_name : str="", _compat_glop_version : Optional[str]=None, + _overload_name_multimix : Optional[str]=None, # do not use ! **kwargs ) -> Environment: """ @@ -327,6 +339,9 @@ def make( _compat_glop_version: Internal, do not use (and can only be used when setting "test=True") + + _overload_name_multimix: + Internal, do not use ! Returns ------- @@ -419,6 +434,7 @@ def make_from_path_fn_(*args, **kwargs): dataset_path=dataset, _add_to_name=_add_to_name_tmp, _compat_glop_version=_compat_glop_version_tmp, + _overload_name_multimix=_overload_name_multimix, n_busbar=n_busbar, **kwargs ) @@ -430,7 +446,7 @@ def make_from_path_fn_(*args, **kwargs): ) # Unknown dev env - if test and dataset_name not in TEST_DEV_ENVS: + if _overload_name_multimix is None and test and dataset_name not in TEST_DEV_ENVS: raise Grid2OpException(_MAKE_UNKNOWN_ENV.format(dataset)) # Known test env and test flag enabled @@ -443,7 +459,13 @@ def make_from_path_fn_(*args, **kwargs): or dataset_name.startswith("educ") ): warnings.warn(_MAKE_DEV_ENV_DEPRECATED_WARN.format(dataset_name)) - ds_path = TEST_DEV_ENVS[dataset_name] + if _overload_name_multimix: + # make is invoked from a Multimix + path_multimix = _get_path_multimix(_overload_name_multimix) + ds_path = os.path.join(path_multimix, dataset_name) + else: + # normal behaviour + ds_path = TEST_DEV_ENVS[dataset_name] # Check if multimix from path if _aux_is_multimix(ds_path): @@ -463,6 +485,7 @@ def make_from_path_fn_(*args, **kwargs): _add_to_name=_add_to_name, _compat_glop_version=_compat_glop_version, experimental_read_from_local_dir=experimental_read_from_local_dir, + _overload_name_multimix=_overload_name_multimix, **kwargs ) @@ -475,6 +498,7 @@ def make_from_path_fn_(*args, **kwargs): logger=logger, n_busbar=n_busbar, experimental_read_from_local_dir=experimental_read_from_local_dir, + _overload_name_multimix=_overload_name_multimix, **kwargs ) @@ -494,5 +518,6 @@ def make_from_path_fn_(*args, **kwargs): logger=logger, n_busbar=n_busbar, experimental_read_from_local_dir=experimental_read_from_local_dir, + _overload_name_multimix=_overload_name_multimix, **kwargs ) diff --git a/grid2op/MakeEnv/MakeFromPath.py b/grid2op/MakeEnv/MakeFromPath.py index 81f31d218..ff85d56f7 100644 --- a/grid2op/MakeEnv/MakeFromPath.py +++ b/grid2op/MakeEnv/MakeFromPath.py @@ -8,6 +8,7 @@ import os import time +import copy import importlib.util import numpy as np import json @@ -34,6 +35,8 @@ from grid2op.operator_attention import LinearAttentionBudget from grid2op.MakeEnv.get_default_aux import _get_default_aux +from grid2op.MakeEnv.PathUtils import _aux_fix_backend_internal_classes + DIFFICULTY_NAME = "difficulty" CHALLENGE_NAME = "competition" @@ -93,7 +96,9 @@ "obs.simulate and obs.get_forecasted_env). If provided, this should " "be a type / class and not an instance of this class. (by default it's None)"), "observation_backend_kwargs": ("key-word arguments to build the observation backend (used for Simulator, " - " obs.simulate and obs.get_forecasted_env). This should be a dictionnary. (by default it's None)") + " obs.simulate and obs.get_forecasted_env). This should be a dictionnary. (by default it's None)"), + "class_in_file": ("experimental: tell grid2op to store the classes generated in the hard drive " + "which can solve lots of pickle / multi processing related issue"), } NAME_CHRONICS_FOLDER = "chronics" @@ -124,6 +129,7 @@ def make_from_dataset_path( n_busbar=2, _add_to_name="", _compat_glop_version=None, + _overload_name_multimix=None, **kwargs, ) -> Environment: """ @@ -873,7 +879,15 @@ def make_from_dataset_path( # new in 1.10.2 : allow_loaded_backend = False classes_path = None - if USE_CLASS_IN_FILE: + init_env = None + this_local_dir = None + use_class_in_files = USE_CLASS_IN_FILE + if "class_in_file" in kwargs: + classes_in_file_kwargs = bool(kwargs["class_in_file"]) + use_class_in_files = classes_in_file_kwargs + + if use_class_in_files: + # new behaviour sys_path = os.path.join(os.path.split(grid_path_abs)[0], "_grid2op_classes") if not os.path.exists(sys_path): try: @@ -881,39 +895,38 @@ def make_from_dataset_path( except FileExistsError: # if another process created it, no problem pass + init_nm = os.path.join(sys_path, "__init__.py") + if not os.path.exists(init_nm): + try: + with open(init_nm, "w", encoding="utf-8") as f: + f.write("This file has been created by grid2op in a `env.make(...)` call. Do not modify it or remove it") + except FileExistsError: + pass - # TODO: automatic delete the directory if needed - - # TODO: check the "new" path works - - # TODO: in the BaseEnv.generate_classes make sure the classes are added to the "__init__" if the file is created - # TODO: make that only if backend can be copied ! + import tempfile + this_local_dir = tempfile.TemporaryDirectory(dir=sys_path) + if experimental_read_from_local_dir: + warnings.warn("With the automatic class generation, we removed the possibility to " + "set `experimental_read_from_local_dir` to True.") + experimental_read_from_local_dir = False # TODO: check the hash thingy is working in baseEnv._aux_gen_classes (currently a pdb) - # TODO: check that previous behaviour is working correctly - - # TODO: create again the environment with the proper "read from local_dir" - # TODO check that it works if the backend changes, if shunt / no_shunt if name of env changes etc. # TODO: what if it cannot write on disk => fallback to previous behaviour + data_feeding_fake = copy.deepcopy(data_feeding) + data_feeding_fake.cleanup_action_space() - # TODO: allow for a way to disable that (with env variable or config in grid2op) - # TODO: keep only one environment that will delete the files (with a flag in its constructor) - - # TODO: explain in doc new behaviour with regards to "class in file" - - # TODO: basic CI for this "new" mode - - # TODO: use the tempfile.TemporaryDirectory() to hold the classes, and in the (real) env copy, runner , env.get_kwargs() - # or whatever - # reference this "tempfile.TemporaryDirectory()" which will be deleted automatically - # when every "pointer" to it are deleted, this sounds more reasonable - if not experimental_read_from_local_dir: - init_env = Environment(init_env_path=os.path.abspath(dataset_path), + # Set graph layout if not None and not an empty dict + if graph_layout is not None and graph_layout: + type(backend).attach_layout(graph_layout) + + if not os.path.exists(this_local_dir.name): + raise EnvError(f"Path {this_local_dir.name} has not been created by the tempfile package") + init_env = Environment(init_env_path=os.path.abspath(dataset_path), init_grid_path=grid_path_abs, - chronics_handler=data_feeding, + chronics_handler=data_feeding_fake, backend=backend, parameters=param, name=name_env + _add_to_name, @@ -937,28 +950,39 @@ def make_from_dataset_path( attention_budget_cls=attention_budget_class, kwargs_attention_budget=kwargs_attention_budget, logger=logger, - n_busbar=n_busbar, + n_busbar=n_busbar, # TODO n_busbar_per_sub different num per substations: read from a config file maybe (if not provided by the user) _compat_glop_version=_compat_glop_version, _read_from_local_dir=None, # first environment to generate the classes and save them + _local_dir_cls=None, + _overload_name_multimix=_overload_name_multimix, kwargs_observation=kwargs_observation, observation_bk_class=observation_backend_class, - observation_bk_kwargs=observation_backend_kwargs, - ) - this_local_dir = f"{time.time()}_{os.getpid()}" - init_env.generate_classes(local_dir_id=this_local_dir) - init_env.backend = None # to avoid to close the backend when init_env is deleted - classes_path = os.path.join(sys_path, this_local_dir) - # to force the reading back of the classes from the hard drive - init_env._forget_classes() # TODO not implemented - init_env.close() - else: - classes_path = sys_path + observation_bk_kwargs=observation_backend_kwargs + ) + if not os.path.exists(this_local_dir.name): + raise EnvError(f"Path {this_local_dir.name} has not been created by the tempfile package") + init_env.generate_classes(local_dir_id=this_local_dir.name) + # fix `my_bk_act_class` and `_complete_action_class` + _aux_fix_backend_internal_classes(type(backend), this_local_dir) + init_env.backend = None # to avoid to close the backend when init_env is deleted + init_env._local_dir_cls = None + classes_path = this_local_dir.name allow_loaded_backend = True else: # legacy behaviour (<= 1.10.1 behaviour) classes_path = None if not experimental_read_from_local_dir else experimental_read_from_local_dir if experimental_read_from_local_dir: - sys_path = os.path.join(os.path.split(grid_path_abs)[0], "_grid2op_classes") + if _overload_name_multimix is not None: + # I am in a multimix + if _overload_name_multimix[0] is None: + # first mix: path is correct + sys_path = os.path.join(os.path.split(grid_path_abs)[0], "_grid2op_classes") + else: + # other mixes I need to retrieve the properties of the first mix + sys_path = _overload_name_multimix[0] + else: + # I am not in a multimix + sys_path = os.path.join(os.path.split(grid_path_abs)[0], "_grid2op_classes") if not os.path.exists(sys_path): raise RuntimeError( "Attempting to load the grid classes from the env path. Yet the directory " @@ -974,9 +998,11 @@ def make_from_dataset_path( f'Please remove "{sys_path}" and call `env.generate_classes()` where env is an ' f"environment created with `experimental_read_from_local_dir=False` (default)" ) - + import sys + sys.path.append(os.path.split(os.path.abspath(sys_path))[0]) + classes_path = sys_path # Finally instantiate env from config & overrides - # including (if activated the new grid2op behaviour) + # including (if activated the new grid2op behaviour) env = Environment( init_env_path=os.path.abspath(dataset_path), init_grid_path=grid_path_abs, @@ -1004,19 +1030,20 @@ def make_from_dataset_path( attention_budget_cls=attention_budget_class, kwargs_attention_budget=kwargs_attention_budget, logger=logger, - n_busbar=n_busbar, + n_busbar=n_busbar, # TODO n_busbar_per_sub different num per substations: read from a config file maybe (if not provided by the user) _compat_glop_version=_compat_glop_version, _read_from_local_dir=classes_path, _allow_loaded_backend=allow_loaded_backend, + _local_dir_cls=this_local_dir, + _overload_name_multimix=_overload_name_multimix, kwargs_observation=kwargs_observation, observation_bk_class=observation_backend_class, - observation_bk_kwargs=observation_backend_kwargs, - ) - + observation_bk_kwargs=observation_backend_kwargs + ) # Update the thermal limit if any if thermal_limits is not None: env.set_thermal_limit(thermal_limits) - + # Set graph layout if not None and not an empty dict if graph_layout is not None and graph_layout: env.attach_layout(graph_layout) diff --git a/grid2op/MakeEnv/PathUtils.py b/grid2op/MakeEnv/PathUtils.py index 99db27b5e..ece6a551f 100644 --- a/grid2op/MakeEnv/PathUtils.py +++ b/grid2op/MakeEnv/PathUtils.py @@ -18,7 +18,7 @@ KEY_DATA_PATH = "data_path" KEY_CLASS_IN_FILE = "class_in_file" - +KEY_CLASS_IN_FILE_ENV_VAR = f"grid2op_{KEY_CLASS_IN_FILE}" def str_to_bool(string: str) -> bool: """convert a "string" to a boolean, with the convention: @@ -46,11 +46,11 @@ def str_to_bool(string: str) -> bool: if KEY_CLASS_IN_FILE in dict_: USE_CLASS_IN_FILE = bool(dict_[KEY_CLASS_IN_FILE]) - if KEY_CLASS_IN_FILE in os.environ: + if KEY_CLASS_IN_FILE_ENV_VAR in os.environ: try: - USE_CLASS_IN_FILE = str_to_bool(os.environ[KEY_CLASS_IN_FILE]) + USE_CLASS_IN_FILE = str_to_bool(os.environ[KEY_CLASS_IN_FILE_ENV_VAR]) except ValueError as exc: - raise RuntimeError(f"Impossible to read the behaviour from `{KEY_CLASS_IN_FILE}` environment variable") from exc + raise RuntimeError(f"Impossible to read the behaviour from `{KEY_CLASS_IN_FILE_ENV_VAR}` environment variable") from exc def _create_path_folder(data_path): @@ -64,3 +64,10 @@ def _create_path_folder(data_path): 'and set the "data_path" to point to a path where you can store data.' "".format(data_path, DEFAULT_PATH_CONFIG) ) + + +def _aux_fix_backend_internal_classes(backend_cls, this_local_dir): + # fix `my_bk_act_class` and `_complete_action_class` + backend_cls._add_internal_classes(this_local_dir) + tmp = {} + backend_cls._make_cls_dict_extended(backend_cls, tmp, as_list=False) diff --git a/grid2op/Observation/baseObservation.py b/grid2op/Observation/baseObservation.py index 513b0ccfa..e1c1016ca 100644 --- a/grid2op/Observation/baseObservation.py +++ b/grid2op/Observation/baseObservation.py @@ -3389,7 +3389,7 @@ def simulate(self, action : "grid2op.Action.BaseAction", time_step:int=1) -> Tup sim_obs._update_internal_env_params(self._obs_env) return (sim_obs, *rest) # parentheses are needed for python 3.6 at least. - def copy(self) -> Self: + def copy(self, env=None) -> Self: """ INTERNAL @@ -3418,14 +3418,19 @@ def copy(self) -> Self: res = copy.deepcopy(self) self._obs_env = obs_env - res._obs_env = obs_env - self.action_helper = action_helper - res.action_helper = action_helper - self._ptr_kwargs_env = _ptr_kwargs_env - res._ptr_kwargs_env = _ptr_kwargs_env - + if env is None: + # this will make a copy but the observation will still + # be "bound" to the original env + res._obs_env = obs_env + res.action_helper = action_helper + res._ptr_kwargs_env = _ptr_kwargs_env + else: + # the action will be "bound" to the new environment + res._obs_env = env._observation_space.obs_env + res.action_helper = env._observation_space.action_helper_env + res._ptr_kwargs_env = env._observation_space._real_env_kwargs return res @property @@ -4704,6 +4709,7 @@ def _make_env_from_arays(self, prod_p=prod_p, prod_v=prod_v, maintenance=maintenance) + ch.max_iter = ch.real_data.max_iter backend = self._obs_env.backend.copy() backend._is_loaded = True diff --git a/grid2op/Observation/observationSpace.py b/grid2op/Observation/observationSpace.py index 8eeebd89a..5b4a00d95 100644 --- a/grid2op/Observation/observationSpace.py +++ b/grid2op/Observation/observationSpace.py @@ -72,6 +72,7 @@ def __init__( observation_bk_kwargs=None, logger=None, _with_obs_env=True, # pass + _local_dir_cls=None, ): """ INTERNAL @@ -80,22 +81,14 @@ def __init__( Env: requires :attr:`grid2op.Environment.BaseEnv.parameters` and :attr:`grid2op.Environment.BaseEnv.backend` to be valid """ - - # lazy import to prevent circular references (Env -> Observation -> Obs Space -> _ObsEnv -> Env) - from grid2op.Environment._obsEnv import _ObsEnv - if actionClass is None: from grid2op.Action import CompleteAction actionClass = CompleteAction - if logger is None: - self.logger = logging.getLogger(__name__) - self.logger.disabled = True - else: - self.logger: logging.Logger = logger.getChild("grid2op_ObsSpace") self._init_observationClass = observationClass SerializableObservationSpace.__init__( - self, gridobj, observationClass=observationClass + self, gridobj, observationClass=observationClass, _local_dir_cls=_local_dir_cls, + logger=logger, ) self.with_forecast = with_forecast self._simulate_parameters = copy.deepcopy(env.parameters) @@ -112,14 +105,9 @@ def __init__( self.reward_helper = RewardHelper(reward_func=self._reward_func, logger=self.logger) self.__can_never_use_simulate = False - # TODO here: have another backend class maybe - _with_obs_env = _with_obs_env and self._create_backend_obs(env, observation_bk_class, observation_bk_kwargs) - - self._ObsEnv_class = _ObsEnv.init_grid( - type(env.backend), force_module=_ObsEnv.__module__ - ) - self._ObsEnv_class._INIT_GRID_CLS = _ObsEnv # otherwise it's lost - setattr(sys.modules[_ObsEnv.__module__], self._ObsEnv_class.__name__, self._ObsEnv_class) + _with_obs_env = _with_obs_env and self._create_backend_obs(env, observation_bk_class, observation_bk_kwargs, _local_dir_cls) + + self._ObsEnv_class = None if _with_obs_env: self._create_obs_env(env, observationClass) self.reward_helper.initialize(self.obs_env) @@ -175,6 +163,18 @@ def set_real_env_kwargs(self, env): del self._real_env_kwargs["observation_bk_kwargs"] def _create_obs_env(self, env, observationClass): + if self._ObsEnv_class is None: + # lazy import to prevent circular references (Env -> Observation -> Obs Space -> _ObsEnv -> Env) + from grid2op.Environment._obsEnv import _ObsEnv + + # self._ObsEnv_class = _ObsEnv.init_grid( + # type(env.backend), force_module=_ObsEnv.__module__, force=_local_dir_cls is not None + # ) + # self._ObsEnv_class._INIT_GRID_CLS = _ObsEnv # otherwise it's lost + self._ObsEnv_class = _ObsEnv.init_grid( + type(env.backend), _local_dir_cls=env._local_dir_cls + ) + self._ObsEnv_class._INIT_GRID_CLS = _ObsEnv # otherwise it's lost other_rewards = {k: v.rewardClass for k, v in env.other_rewards.items()} self.obs_env = self._ObsEnv_class( init_env_path=None, # don't leak the path of the real grid to the observation space @@ -200,14 +200,16 @@ def _create_obs_env(self, env, observationClass): highres_sim_counter=env.highres_sim_counter, _complete_action_cls=env._complete_action_cls, _ptr_orig_obs_space=self, + _local_dir_cls=env._local_dir_cls, + _read_from_local_dir=env._read_from_local_dir, ) for k, v in self.obs_env.other_rewards.items(): v.initialize(self.obs_env) - def _aux_create_backend(self, env, observation_bk_class, observation_bk_kwargs, path_grid_for): + def _aux_create_backend(self, env, observation_bk_class, observation_bk_kwargs, path_grid_for, _local_dir_cls): if observation_bk_kwargs is None: observation_bk_kwargs = env.backend._my_kwargs - observation_bk_class_used = observation_bk_class.init_grid(type(env.backend)) + observation_bk_class_used = observation_bk_class.init_grid(type(env.backend), _local_dir_cls=_local_dir_cls) self._backend_obs = observation_bk_class_used(**observation_bk_kwargs) self._backend_obs.set_env_name(env.name) self._backend_obs.load_grid(path_grid_for) @@ -216,7 +218,7 @@ def _aux_create_backend(self, env, observation_bk_class, observation_bk_kwargs, self._backend_obs.assert_grid_correct_after_powerflow() self._backend_obs.set_thermal_limit(env.get_thermal_limit()) - def _create_backend_obs(self, env, observation_bk_class, observation_bk_kwargs): + def _create_backend_obs(self, env, observation_bk_class, observation_bk_kwargs, _local_dir_cls): _with_obs_env = True path_sim_bk = os.path.join(env.get_path_env(), "grid_forecast.json") if observation_bk_class is not None or observation_bk_kwargs is not None: @@ -232,12 +234,12 @@ def _create_backend_obs(self, env, observation_bk_class, observation_bk_kwargs): path_grid_for = path_sim_bk else: path_grid_for = os.path.join(env.get_path_env(), "grid.json") - self._aux_create_backend(env, observation_bk_class, observation_bk_kwargs, path_grid_for) + self._aux_create_backend(env, observation_bk_class, observation_bk_kwargs, path_grid_for, _local_dir_cls) elif os.path.exists(path_sim_bk) and os.path.isfile(path_sim_bk): # backend used for simulate will use the same class with same args as the env # backend, but with a different grid observation_bk_class = env._raw_backend_class - self._aux_create_backend(env, observation_bk_class, observation_bk_kwargs, path_sim_bk) + self._aux_create_backend(env, observation_bk_class, observation_bk_kwargs, path_sim_bk, _local_dir_cls) elif env.backend._can_be_copied: # case where I can copy the backend for the 'simulate' and I don't need to build # it (uses same class and same grid) @@ -263,10 +265,11 @@ def _deactivate_simulate(self, env): self._backend_obs.close() self._backend_obs = None self.with_forecast = False - env.deactivate_forecast() - env.backend._can_be_copied = False - self.logger.warn("Forecasts have been deactivated because " - "the backend cannot be copied.") + if env is not None: + env.deactivate_forecast() + env.backend._can_be_copied = False + self.logger.warning("Forecasts have been deactivated because " + "the backend cannot be copied.") def reactivate_forecast(self, env): if self.__can_never_use_simulate: @@ -279,8 +282,8 @@ def reactivate_forecast(self, env): if self._backend_obs is not None: self._backend_obs.close() self._backend_obs = None - self._create_backend_obs(env, self._observation_bk_class, self._observation_bk_kwargs) - if self.obs_env is not None : + self._create_backend_obs(env, self._observation_bk_class, self._observation_bk_kwargs, env._local_dir_cls) + if self.obs_env is not None: self.obs_env.close() self.obs_env = None self._create_obs_env(env, self._init_observationClass) @@ -329,7 +332,8 @@ def _change_parameters(self, new_param): change the parameter of the "simulate" environment """ - self.obs_env.change_parameters(new_param) + if self.obs_env is not None: + self.obs_env.change_parameters(new_param) self._simulate_parameters = new_param def change_other_rewards(self, dict_reward): @@ -453,7 +457,7 @@ def reset(self, real_env): self.obs_env.reset() self._env_param = copy.deepcopy(real_env.parameters) - def _custom_deepcopy_for_copy(self, new_obj): + def _custom_deepcopy_for_copy(self, new_obj, env=None): """implements a faster "res = copy.deepcopy(self)" to use in "self.copy" Do not use it anywhere else... @@ -489,13 +493,17 @@ def _custom_deepcopy_for_copy(self, new_obj): new_obj._ptr_kwargs_observation = self._ptr_kwargs_observation # real env kwargs, these is a "pointer" anyway - new_obj._real_env_kwargs = self._real_env_kwargs + if env is not None: + from grid2op.Environment import Environment + new_obj._real_env_kwargs = Environment.get_kwargs(env, False, False) + else: + new_obj._real_env_kwargs = self._real_env_kwargs new_obj._observation_bk_class = self._observation_bk_class new_obj._observation_bk_kwargs = self._observation_bk_kwargs new_obj._ObsEnv_class = self._ObsEnv_class - def copy(self, copy_backend=False): + def copy(self, copy_backend=False, env=None): """ INTERNAL @@ -516,18 +524,23 @@ def copy(self, copy_backend=False): # create an empty "me" my_cls = type(self) res = my_cls.__new__(my_cls) - self._custom_deepcopy_for_copy(res) + self._custom_deepcopy_for_copy(res, env) if not copy_backend: res._backend_obs = backend res._empty_obs = obs_.copy() res.obs_env = obs_env else: - res.obs_env = obs_env.copy() - res.obs_env._ptr_orig_obs_space = res - res._backend_obs = res.obs_env.backend - res._empty_obs = obs_.copy() - res._empty_obs._obs_env = res.obs_env + # backend needs to be copied + if obs_env is not None: + # I also need to copy the obs env + res.obs_env = obs_env.copy(env=env, new_obs_space=res) + res._backend_obs = res.obs_env.backend + res._empty_obs = obs_.copy() + res._empty_obs._obs_env = res.obs_env + else: + # no obs env: I do nothing + res.obs_env = None # assign back the results self._backend_obs = backend diff --git a/grid2op/Observation/serializableObservationSpace.py b/grid2op/Observation/serializableObservationSpace.py index 1471a51ef..7796eb74c 100644 --- a/grid2op/Observation/serializableObservationSpace.py +++ b/grid2op/Observation/serializableObservationSpace.py @@ -6,6 +6,9 @@ # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. +import logging +import copy + from grid2op.Space import SerializableSpace from grid2op.Observation.completeObservation import CompleteObservation @@ -27,7 +30,7 @@ class SerializableObservationSpace(SerializableSpace): """ - def __init__(self, gridobj, observationClass=CompleteObservation, _init_grid=True): + def __init__(self, gridobj, observationClass=CompleteObservation, logger=None, _init_grid=True, _local_dir_cls=None): """ Parameters @@ -40,16 +43,26 @@ def __init__(self, gridobj, observationClass=CompleteObservation, _init_grid=Tru """ SerializableSpace.__init__( - self, gridobj=gridobj, subtype=observationClass, _init_grid=_init_grid + self, gridobj=gridobj, + subtype=observationClass, + _init_grid=_init_grid, + _local_dir_cls=_local_dir_cls ) self.observationClass = self.subtype self._empty_obs = self._template_obj + + if logger is None: + self.logger = logging.getLogger(__name__) + self.logger.disabled = True + else: + self.logger: logging.Logger = logger.getChild("grid2op_ObsSpace") def _custom_deepcopy_for_copy(self, new_obj): super()._custom_deepcopy_for_copy(new_obj) # SerializableObservationSpace new_obj.observationClass = self.observationClass # const new_obj._empty_obs = self._template_obj # const + new_obj.logger = copy.deepcopy(self.logger) @staticmethod def from_dict(dict_): diff --git a/grid2op/Opponent/opponentSpace.py b/grid2op/Opponent/opponentSpace.py index 60d3a9927..bca588d46 100644 --- a/grid2op/Opponent/opponentSpace.py +++ b/grid2op/Opponent/opponentSpace.py @@ -49,6 +49,7 @@ def __init__( attack_cooldown, # minimum duration between two consecutive attack budget_per_timestep=0.0, action_space=None, + _local_dir_cls=None ): if action_space is not None: diff --git a/grid2op/Runner/aux_fun.py b/grid2op/Runner/aux_fun.py index b9839f5c1..83ae34cd6 100644 --- a/grid2op/Runner/aux_fun.py +++ b/grid2op/Runner/aux_fun.py @@ -8,7 +8,7 @@ import copy import time - +import warnings import numpy as np from grid2op.Environment import Environment @@ -36,6 +36,7 @@ def _aux_add_data(reward, env, episode, ) return reward + def _aux_one_process_parrallel( runner, episode_this_process, @@ -46,22 +47,17 @@ def _aux_one_process_parrallel( max_iter=None, add_detailed_output=False, add_nb_highres_sim=False, - init_states=None + init_states=None, + reset_options=None, ): """this is out of the runner, otherwise it does not work on windows / macos""" - # chronics_handler = ChronicsHandler( - # chronicsClass=runner.gridStateclass, - # path=runner.path_chron, - # **runner.gridStateclass_kwargs - # ) parameters = copy.deepcopy(runner.parameters) nb_episode_this_process = len(episode_this_process) res = [(None, None, None) for _ in range(nb_episode_this_process)] for i, ep_id in enumerate(episode_this_process): # `ep_id`: grid2op id of the episode i want to play # `i`: my id of the episode played (0, 1, ... episode_this_process) - env, agent = runner._new_env(parameters=parameters - ) + env, agent = runner._new_env(parameters=parameters) try: env_seed = None if env_seeds is not None: @@ -75,7 +71,11 @@ def _aux_one_process_parrallel( init_state = init_states[i] else: init_state = None - + + if reset_options is not None: + reset_option = reset_options[i] + else: + reset_option = None tmp_ = _aux_run_one_episode( env, agent, @@ -87,7 +87,8 @@ def _aux_one_process_parrallel( agent_seed=agt_seed, detailed_output=add_detailed_output, use_compact_episode_data=runner.use_compact_episode_data, - init_state=init_state + init_state=init_state, + reset_option=reset_option ) (name_chron, cum_reward, nb_time_step, max_ts, episode_data, nb_highres_sim) = tmp_ id_chron = env.chronics_handler.get_id() @@ -114,7 +115,8 @@ def _aux_run_one_episode( max_iter=None, detailed_output=False, use_compact_episode_data=False, - init_state=None + init_state=None, + reset_option=None, ): done = False time_step = int(0) @@ -122,20 +124,34 @@ def _aux_run_one_episode( cum_reward = dt_float(0.0) # set the environment to use the proper chronic - env.set_id(indx) - # set the seed - if env_seed is not None: - env.seed(env_seed) - + # env.set_id(indx) + if reset_option is None: + reset_option = {} + + if "time serie id" in reset_option: + warnings.warn("You provided both `episode_id` and the key `'time serie id'` is present " + "in the provided `reset_options`. In this case, grid2op will ignore the " + "`time serie id` of the `reset_options` and keep the value in `episode_id`.") + reset_option["time serie id"] = indx + # handle max_iter if max_iter is not None: - env.chronics_handler.set_max_iter(max_iter) - + if "max step" in reset_option: + warnings.warn("You provided both `max_iter` and the key `'max step'` is present " + "in the provided `reset_options`. In this case, grid2op will ignore the " + "`max step` of the `reset_options` and keep the value in `max_iter`.") + reset_option["max step"] = max_iter + + # handle init state + if init_state is not None: + if "init state" in reset_option: + warnings.warn("You provided both `init_state` and the key `'init state'` is present " + "in the provided `reset_options`. In this case, grid2op will ignore the " + "`init state` of the `reset_options` and keep the value in `init_state`.") + reset_option["init state"] = init_state + # reset it - if init_state is None: - obs = env.reset() - else: - obs = env.reset(options={"init state": init_state}) + obs = env.reset(seed=env_seed, options=reset_option) # reset the number of calls to high resolution simulator env._highres_sim_counter._HighResSimCounter__nb_highres_called = 0 @@ -321,6 +337,7 @@ def _aux_run_one_episode( episode.set_episode_times(env, time_act, beg_, end_) episode.to_disk() + episode.make_serializable() name_chron = env.chronics_handler.get_name() return (name_chron, cum_reward, int(time_step), diff --git a/grid2op/Runner/runner.py b/grid2op/Runner/runner.py index 647630ae5..189dbefa6 100644 --- a/grid2op/Runner/runner.py +++ b/grid2op/Runner/runner.py @@ -21,7 +21,7 @@ from grid2op.Reward import FlatReward, BaseReward from grid2op.Rules import AlwaysLegal from grid2op.Environment import Environment -from grid2op.Chronics import ChronicsHandler, GridStateFromFile, GridValue +from grid2op.Chronics import ChronicsHandler, GridStateFromFile, GridValue, MultifolderWithCache from grid2op.Backend import Backend, PandaPowerBackend from grid2op.Parameters import Parameters from grid2op.Agent import DoNothingAgent, BaseAgent @@ -29,18 +29,20 @@ from grid2op.dtypes import dt_float from grid2op.Opponent import BaseOpponent, NeverAttackBudget from grid2op.operator_attention import LinearAttentionBudget +from grid2op.Space import DEFAULT_N_BUSBAR_PER_SUB +from grid2op.Episode import EpisodeData +# on windows if i start using sequential, i need to continue using sequential +# if i start using parallel i need to continue using parallel +# so i force the usage of the "starmap" stuff even if there is one process on windows +from grid2op._glop_platform_info import _IS_WINDOWS, _IS_LINUX, _IS_MACOS + from grid2op.Runner.aux_fun import ( _aux_run_one_episode, _aux_make_progress_bar, _aux_one_process_parrallel, ) from grid2op.Runner.basic_logger import DoNothingLog, ConsoleLog -from grid2op.Episode import EpisodeData -# on windows if i start using sequential, i need to continue using sequential -# if i start using parallel i need to continue using parallel -# so i force the usage of the "starmap" stuff even if there is one process on windows -from grid2op._glop_platform_info import _IS_WINDOWS, _IS_LINUX, _IS_MACOS runner_returned_type = Union[Tuple[str, str, float, int, int], Tuple[str, str, float, int, int, EpisodeData], @@ -55,6 +57,7 @@ # TODO use gym logger if specified by the user. # TODO: if chronics are "loop through" multiple times, only last results are saved. :-/ +KEY_TIME_SERIE_ID = "time serie id" class Runner(object): """ @@ -71,8 +74,12 @@ class Runner(object): env = grid2op.make("l2rpn_case14_sandbox") + # use of a Runner + runner = Runner(**env.get_params_for_runner(), agentClass=RandomAgent) + res = runner.run(nb_episode=nn_episode) + ############### - # the gym loops + # the "equivalent" gym loops nb_episode = 5 for i in range(nb_episode): obs = env.reset() @@ -81,11 +88,10 @@ class Runner(object): while not done: act = agent.act(obs, reward, done) obs, reward, done, info = env.step(act) - + # but this loop does not handle the seeding, does not save the results + # does not store anything related to the run you made etc. + # the Runner can do that with simple calls (see bellow) ############### - # equivalent with use of a Runner - runner = Runner(**env.get_params_for_runner(), agentClass=RandomAgent) - res = runner.run(nb_episode=nn_episode) This specific class as for main purpose to evaluate the performance of a trained @@ -98,6 +104,109 @@ class Runner(object): encourage you to use the :func:`grid2op.Environment.Environment.get_params_for_runner` for creating a runner. + You can customize the agent instance you want with the following code: + + .. code-block:: python + + import grid2op + from grid2op.Agent import RandomAgent # for example... + from grid2op.Runner import Runner + + env = grid2op.make("l2rpn_case14_sandbox") + + agent_instance = RandomAgent(env.action_space) + runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=agent_instance) + res = runner.run(nb_episode=nn_episode) + + You can customize the seeds, the scenarios ID you want, the number of initial steps to skip, the + maximum duration of an episode etc. For more information, please refer to the :func:`Runner.run` + + You can also easily retrieve the :class:`grid2op.Episode.EpisodeData` representing your runs with: + + .. code-block:: python + + import grid2op + from grid2op.Agent import RandomAgent # for example... + from grid2op.Runner import Runner + + env = grid2op.make("l2rpn_case14_sandbox") + + agent_instance = RandomAgent(env.action_space) + runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=agent_instance) + res = runner.run(nb_episode=2, + add_detailed_output=True) + for *_, ep_data in res: + # ep_data are the EpisodeData you can use to do whatever + ... + + You can save the results in a standardized format with: + + .. code-block:: python + + import grid2op + from grid2op.Agent import RandomAgent # for example... + from grid2op.Runner import Runner + + env = grid2op.make("l2rpn_case14_sandbox") + + agent_instance = RandomAgent(env.action_space) + runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=agent_instance) + res = runner.run(nb_episode=2, + save_path="A/PATH/SOMEWHERE") # eg "/home/user/you/grid2op_results/this_run" + + You can also easily (on some platform) easily make the evaluation faster by using the "multi processing" python + package with: + + .. code-block:: python + + import grid2op + from grid2op.Agent import RandomAgent # for example... + from grid2op.Runner import Runner + + env = grid2op.make("l2rpn_case14_sandbox") + + agent_instance = RandomAgent(env.action_space) + runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=agent_instance) + res = runner.run(nb_episode=2, + nb_process=2) + + And, as of grid2op 1.10.3 you can know customize the multi processing context you want + to use to evaluate your agent, like this: + + .. code-block:: python + + import multiprocessing as mp + import grid2op + from grid2op.Agent import RandomAgent # for example... + from grid2op.Runner import Runner + + env = grid2op.make("l2rpn_case14_sandbox") + + agent_instance = RandomAgent(env.action_space) + + ctx = mp.get_context('spawn') # or "fork" or "forkserver" + runner = Runner(**env.get_params_for_runner(), + agentClass=None, + agentInstance=agent_instance, + mp_context=ctx) + res = runner.run(nb_episode=2, + nb_process=2) + + If you set this, the multiprocessing `Pool` used to evaluate your agents will be made with: + + .. code-block:: python + + with mp_context.Pool(nb_process) as p: + .... + + Otherwise the default "Pool" is used: + + .. code-block:: python + + with Pool(nb_process) as p: + .... + + Attributes ---------- envClass: ``type`` @@ -244,7 +353,7 @@ def __init__( init_env_path: str, init_grid_path: str, path_chron, # path where chronics of injections are stored - n_busbar=2, + n_busbar=DEFAULT_N_BUSBAR_PER_SUB, name_env="unknown", parameters_path=None, names_chronics_to_backend=None, @@ -286,10 +395,12 @@ def __init__( kwargs_observation=None, observation_bk_class=None, observation_bk_kwargs=None, - + mp_context=None, # experimental: whether to read from local dir or generate the classes on the fly: - _read_from_local_dir=False, + _read_from_local_dir=None, _is_test=False, # TODO not implemented !! + _local_dir_cls=None, + _overload_name_multimix=None ): """ Initialize the Runner. @@ -357,6 +468,7 @@ def __init__( self._n_busbar = n_busbar self.with_forecast = with_forecast self.name_env = name_env + self._overload_name_multimix = _overload_name_multimix if not isinstance(envClass, type): raise Grid2OpException( 'Parameter "envClass" used to build the Runner should be a type (a class) and not an object ' @@ -372,7 +484,6 @@ def __init__( self.other_env_kwargs = other_env_kwargs else: self.other_env_kwargs = {} - if not isinstance(actionClass, type): raise Grid2OpException( 'Parameter "actionClass" used to build the Runner should be a type (a class) and not an object ' @@ -428,7 +539,11 @@ def __init__( 'grid2op.GridValue. Please modify "gridStateclass" parameter.' ) self.gridStateclass = gridStateclass - + if issubclass(gridStateclass, MultifolderWithCache): + warnings.warn("We do not recommend to use the `MultifolderWithCache` during the " + "evaluation of your agents. It is possible but you might end up with " + "side effects (see issue 616 for example). It is safer to use the " + "`Multifolder` class as a drop-in replacement.") self.envClass._check_rules_correct(legalActClass) self.legalActClass = legalActClass @@ -450,6 +565,14 @@ def __init__( else: self._backend_kwargs = {} + # we keep a reference to the local directory (tmpfile) where + # the classes definition are stored while the runner lives + self._local_dir_cls = _local_dir_cls + + # multi processing context that controls the way the computations are + # distributed when using multiple processes + self._mp_context = mp_context + self.__can_copy_agent = True if agentClass is not None: if agentInstance is not None: @@ -539,11 +662,6 @@ def __init__( self.max_iter = max_iter if max_iter > 0: self.gridStateclass_kwargs["max_iter"] = max_iter - # self.chronics_handler = ChronicsHandler( - # chronicsClass=self.gridStateclass, - # path=self.path_chron, - # **self.gridStateclass_kwargs - # ) self.verbose = verbose self.thermal_limit_a = thermal_limit_a @@ -636,12 +754,6 @@ def _make_new_backend(self): return res def _new_env(self, parameters) -> Tuple[BaseEnv, BaseAgent]: - # the same chronics_handler is used for all the environments. - # make sure to "reset" it properly - # (this is handled elsewhere in case of "multi chronics") - # ch_used = copy.deepcopy(chronics_handler) - # if not ch_used.chronicsClass.MULTI_CHRONICS: - # ch_used.next_chronics() chronics_handler = ChronicsHandler( chronicsClass=self.gridStateclass, path=self.path_chron, @@ -651,8 +763,8 @@ def _new_env(self, parameters) -> Tuple[BaseEnv, BaseAgent]: with warnings.catch_warnings(): warnings.filterwarnings("ignore") res = self.envClass.init_obj_from_kwargs( - n_busbar=self._n_busbar, other_env_kwargs=self.other_env_kwargs, + n_busbar=self._n_busbar, init_env_path=self.init_env_path, init_grid_path=self.init_grid_path, chronics_handler=chronics_handler, @@ -685,6 +797,9 @@ def _new_env(self, parameters) -> Tuple[BaseEnv, BaseAgent]: observation_bk_kwargs=self._observation_bk_kwargs, _raw_backend_class=self.backendClass, _read_from_local_dir=self._read_from_local_dir, + # _local_dir_cls: we don't set it, in parrallel mode it makes no sense ! + _local_dir_cls=None, + _overload_name_multimix=self._overload_name_multimix ) if self.thermal_limit_a is not None: @@ -721,7 +836,7 @@ def reset(self): .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ Used to reset an environment. This method is called at the beginning of each new episode. - If the environment is not initialized, then it initializes it with :func:`Runner.make_env`. + If the environment is not initialized, then it initializes it with :func:`Runner.init_env`. """ pass @@ -736,7 +851,8 @@ def run_one_episode( episode_id=None, detailed_output=False, add_nb_highres_sim=False, - init_state=None + init_state=None, + reset_options=None, ) -> runner_returned_type: """ INTERNAL @@ -773,12 +889,23 @@ def run_one_episode( """ self.reset() - with self.init_env() as env: + with self.init_env() as env: + # small piece of code to detect the + # episode id + if episode_id is None: + # user did not provide any episode id, I check in the reset_options + if reset_options is not None: + if KEY_TIME_SERIE_ID in reset_options: + indx = int(reset_options[KEY_TIME_SERIE_ID]) + del reset_options[KEY_TIME_SERIE_ID] + else: + # user specified an episode id, I use it. + indx = episode_id res = _aux_run_one_episode( env, self.agent, self.logger, - indx if episode_id is None else episode_id, + indx, path_save, pbar=pbar, env_seed=env_seed, @@ -787,9 +914,10 @@ def run_one_episode( detailed_output=detailed_output, use_compact_episode_data = self.use_compact_episode_data, init_state=init_state, + reset_option=reset_options, ) if max_iter is not None: - env.chronics_handler.set_max_iter(-1) + env.chronics_handler._set_max_iter(-1) id_chron = env.chronics_handler.get_id() # `res` here necessarily contains detailed_output and nb_highres_call @@ -813,7 +941,8 @@ def _run_sequential( episode_id=None, add_detailed_output=False, add_nb_highres_sim=False, - init_states=None + init_states=None, + reset_options=None, ) -> List[runner_returned_type]: """ INTERNAL @@ -886,9 +1015,22 @@ def _run_sequential( init_state = None if init_states is not None: init_state = init_states[i] - ep_id = i # if no "episode_id" is provided i used the i th one + reset_opt = None + if reset_options is not None: + # we copy it because we might remove the "time serie id" + # from it + reset_opt = reset_options[i].copy() + # if no "episode_id" is provided i used the i th one + ep_id = i if episode_id is not None: + # if episode_id is provided, I use this one ep_id = episode_id[i] # otherwise i use the provided one + else: + # if it's not provided, I check if one is used in the `reset_options` + if reset_opt is not None: + if KEY_TIME_SERIE_ID in reset_opt: + ep_id = int(reset_opt[KEY_TIME_SERIE_ID]) + del reset_opt[KEY_TIME_SERIE_ID] ( id_chron, name_chron, @@ -900,6 +1042,7 @@ def _run_sequential( ) = self.run_one_episode( path_save=path_save, indx=ep_id, + episode_id=ep_id, pbar=next_pbar[0], env_seed=env_seed, agent_seed=agt_seed, @@ -907,6 +1050,7 @@ def _run_sequential( detailed_output=True, add_nb_highres_sim=True, init_state=init_state, + reset_options=reset_opt ) res[i] = (id_chron, name_chron, @@ -932,7 +1076,8 @@ def _run_parrallel( episode_id=None, add_detailed_output=False, add_nb_highres_sim=False, - init_states=None + init_states=None, + reset_options=None, ) -> List[runner_returned_type]: """ INTERNAL @@ -1003,7 +1148,7 @@ def _run_parrallel( # if i start using parallel i need to continue using parallel # so i force the usage of the sequential mode self.logger.warn( - "Runner.run_parrallel: number of process set to 1. Failing back into sequential mod." + "Runner.run_parrallel: number of process set to 1. Failing back into sequential mode." ) return self._run_sequential( nb_episode, @@ -1015,16 +1160,33 @@ def _run_parrallel( add_detailed_output=add_detailed_output, add_nb_highres_sim=add_nb_highres_sim, init_states=init_states, + reset_options=reset_options ) else: + if self._local_dir_cls is not None: + self._local_dir_cls._RUNNER_DO_NOT_ERASE = True self._clean_up() nb_process = int(nb_process) process_ids = [[] for i in range(nb_process)] for i in range(nb_episode): if episode_id is None: - process_ids[i % nb_process].append(i) + # user does not provide episode_id + if reset_options is not None: + # we copy them, because we might delete some things from them + reset_options = [el.copy() for el in reset_options] + + # we check if the reset_options contains the "time serie id" + if KEY_TIME_SERIE_ID in reset_options[i]: + this_ep_id = int(reset_options[i][KEY_TIME_SERIE_ID]) + del reset_options[i][KEY_TIME_SERIE_ID] + else: + this_ep_id = i + else: + this_ep_id = i + process_ids[i % nb_process].append(this_ep_id) else: + # user provided episode_id, we use this one process_ids[i % nb_process].append(episode_id[i]) if env_seeds is None: @@ -1046,17 +1208,24 @@ def _run_parrallel( if init_states is None: init_states_res = [None for _ in range(nb_process)] else: - # split the seeds according to the process + # split the init states according to the process init_states_res = [[] for _ in range(nb_process)] for i in range(nb_episode): init_states_res[i % nb_process].append(init_states[i]) + if reset_options is None: + reset_options_res = [None for _ in range(nb_process)] + else: + # split the reset options according to the process + reset_options_res = [[] for _ in range(nb_process)] + for i in range(nb_episode): + reset_options_res[i % nb_process].append(reset_options[i]) + res = [] if _IS_LINUX: lists = [(self,) for _ in enumerate(process_ids)] else: lists = [(Runner(**self._get_params()),) for _ in enumerate(process_ids)] - for i, pn in enumerate(process_ids): lists[i] = (*lists[i], pn, @@ -1067,15 +1236,19 @@ def _run_parrallel( max_iter, add_detailed_output, add_nb_highres_sim, - init_states_res[i]) - - if get_start_method() == 'spawn': - # https://github.com/rte-france/Grid2Op/issues/600 - with get_context("spawn").Pool(nb_process) as p: - tmp = p.starmap(_aux_one_process_parrallel, lists) - else: - with Pool(nb_process) as p: + init_states_res[i], + reset_options_res[i]) + if self._mp_context is not None: + with self._mp_context.Pool(nb_process) as p: tmp = p.starmap(_aux_one_process_parrallel, lists) + else: + if get_start_method() == 'spawn': + # https://github.com/rte-france/Grid2Op/issues/600 + with get_context("spawn").Pool(nb_process) as p: + tmp = p.starmap(_aux_one_process_parrallel, lists) + else: + with Pool(nb_process) as p: + tmp = p.starmap(_aux_one_process_parrallel, lists) for el in tmp: res += el return res @@ -1121,8 +1294,15 @@ def _get_params(self): "logger": self.logger, "use_compact_episode_data": self.use_compact_episode_data, "kwargs_observation": self._kwargs_observation, + "observation_bk_class": self._observation_bk_class, + "observation_bk_kwargs": self._observation_bk_kwargs, "_read_from_local_dir": self._read_from_local_dir, "_is_test": self._is_test, + "_overload_name_multimix": self._overload_name_multimix, + "other_env_kwargs": self.other_env_kwargs, + "n_busbar": self._n_busbar, + "mp_context": None, # this is used in multi processing context, avoid to multi process a multi process stuff + "_local_dir_cls": self._local_dir_cls, } return res @@ -1140,6 +1320,7 @@ def _clean_up(self): def run( self, nb_episode, + *, # force kwargs nb_process=1, path_save=None, max_iter=None, @@ -1150,6 +1331,7 @@ def run( add_detailed_output=False, add_nb_highres_sim=False, init_states=None, + reset_options=None, ) -> List[runner_returned_type]: """ Main method of the :class:`Runner` class. It will either call :func:`Runner._run_sequential` if "nb_process" is @@ -1170,7 +1352,11 @@ def run( max_iter: ``int`` Maximum number of iteration you want the runner to perform. - + + .. warning:: + (only for grid2op >= 1.10.3) If set in this parameters, it will + erase all values that may be present in the `reset_options` kwargs (key `"max step"`) + pbar: ``bool`` or ``type`` or ``object`` How to display the progress bar, understood as follow: @@ -1196,6 +1382,15 @@ def run( For each of the nb_episdeo you want to compute, it specifies the id of the chronix that will be used. By default ``None``, no seeds are set. If provided, its size should match ``nb_episode``. + + .. warning:: + (only for grid2op >= 1.10.3) If set in this parameters, it will + erase all values that may be present in the `reset_options` kwargs (key `"time serie id"`). + + .. danger:: + As of now, it's not properly handled to compute twice the same `episode_id` more than once using the runner + (more specifically, the computation will happen but file might not be saved correctly on the + hard drive: attempt to save all the results in the same location. We do not advise to do it) add_detailed_output: ``bool`` A flag to add an :class:`EpisodeData` object to the results, containing a lot of information about the run @@ -1215,6 +1410,43 @@ def run( If you provide a dictionary or a grid2op action, then this element will be used for all scenarios you want to run. + .. warning:: + (only for grid2op >= 1.10.3) If set in this parameters, it will + erase all values that may be present in the `reset_options` kwargs (key `"init state"`). + + reset_options: + (added in grid2op 1.10.3) Possibility to customize the call to `env.reset` made internally by + the Runner. More specifically, it will pass a custom `options` when the runner calls + `env.reset(..., options=XXX)`. + + It should either be: + + - a dictionary that can be used directly by :func:`grid2op.Environment.Environment.reset`. + In this case the same dictionary will be used for all the episodes computed by the runner. + - a list / tuple of one of the above with the same size as the number of episode you want to + compute which allow a full customization for each episode. + + .. warning:: + If the kwargs `max_iter` is present when calling `runner.run` function, then the key `max step` + will be ignored in all the `reset_options` dictionary. + + .. warning:: + If the kwargs `episode_id` is present when calling `runner.run` function, then the key `time serie id` + will be ignored in all the `reset_options` dictionary. + + .. warning:: + If the kwargs `init_states` is present when calling `runner.run` function, then the key `init state` + will be ignored in all the `reset_options` dictionary. + + .. danger:: + If you provide the key "time serie id" in one of the `reset_options` dictionary, we recommend + you do it for all `reset options` otherwise you might not end up computing the correct episodes. + + .. danger:: + As of now, it's not properly handled to compute twice the same `time serie` more than once using the runner + (more specifically, the computation will happen but file might not be saved correctly on the + hard drive: attempt to save all the results in the same location. We do not advise to do it) + Returns ------- res: ``list`` @@ -1235,7 +1467,7 @@ def run( You can use the runner this way: - .. code-block: python + .. code-block:: python import grid2op from gri2op.Runner import Runner @@ -1247,7 +1479,7 @@ def run( If you would rather to provide an agent instance (and not a class) you can do it this way: - .. code-block: python + .. code-block:: python import grid2op from gri2op.Runner import Runner @@ -1263,7 +1495,7 @@ def run( by passing `env_seeds` and `agent_seeds` parameters (on the example bellow, the agent will be seeded with 42 and the environment with 0. - .. code-block: python + .. code-block:: python import grid2op from gri2op.Runner import Runner @@ -1277,7 +1509,7 @@ def run( Since grid2op 1.10.2 you can also set the initial state of the grid when calling the runner. You can do that with the kwargs `init_states`, for example like this: - .. code-block: python + .. code-block:: python import grid2op from gri2op.Runner import Runner @@ -1307,7 +1539,85 @@ def run( that you can control what exactly is done (set the `"method"`) more information about this on the doc of the :func:`grid2op.Environment.Environment.reset` function. + + Since grid2op 1.10.3 you can also customize the way the runner will "reset" the + environment with the kwargs `reset_options`. + + Concretely, if you specify `runner.run(..., reset_options=XXX)` then the environment + will be reset with a call to `env.reset(options=reset_options)`. + + As for the init states kwargs, reset_options can be either a dictionnary, in this + case the same dict will be used for running all the episode or a list / tuple + of dictionnaries with the same size as the `nb_episode` kwargs. + + .. code-block:: python + + import grid2op + from gri2op.Runner import Runner + from grid2op.Agent import RandomAgent + + env = grid2op.make("l2rpn_case14_sandbox") + my_agent = RandomAgent(env.action_space) + runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=my_agent) + res = runner.run(nb_episode=2, + agent_seeds=[42, 43], + env_seeds=[0, 1], + reset_options={"init state": {"set_line_status": [(0, -1)]}} + ) + # same initial state will be used for the two epusode + + res2 = runner.run(nb_episode=2, + agent_seeds=[42, 43], + env_seeds=[0, 1], + reset_options=[{"init state": {"set_line_status": [(0, -1)]}}, + {"init state": {"set_line_status": [(1, -1)]}}] + ) + # two different initial states will be used: the first one for the + # first episode and the second one for the second + + .. note:: + In case of conflicting inputs, for example when you specify: + + .. code-block:: python + + runner.run(..., + init_states=XXX, + reset_options={"init state"=YYY} + ) + + or + .. code-block:: python + + runner.run(..., + max_iter=XXX, + reset_options={"max step"=YYY} + ) + + or + + .. code-block:: python + + runner.run(..., + episode_id=XXX, + reset_options={"time serie id"=YYY} + ) + + Then: 1) a warning is issued to inform you that you might have + done something wrong and 2) the value in `XXX` above (*ie* the + value provided in the `runner.run` kwargs) is always used + instead of the value `YYY` (*ie* the value present in the + reset_options). + + In other words, the arguments of the `runner.run` have the + priority over the arguments passed to the `reset_options`. + + .. danger:: + If you provide the key "time serie id" in one of the `reset_options` + dictionary, we recommend + you do it for all `reset_options` otherwise you might not end up + computing the correct episodes. + """ if nb_episode < 0: raise RuntimeError("Impossible to run a negative number of scenarios.") @@ -1354,8 +1664,43 @@ def run( f"You provided {type(el)} at position {i}.") else: raise RuntimeError("When using `init_state` in the runner, you should make sure to use " - "either use dictionnary, grid2op actions or list of actions.") - + "either use dictionnary, grid2op actions or list / tuple of actions.") + + if reset_options is not None: + if isinstance(reset_options, dict): + for k in reset_options: + if not k in self.envClass.KEYS_RESET_OPTIONS: + raise RuntimeError("Wehn specifying `reset options` all keys of the dictionary should " + "be compatible with the available reset options of your environment " + f"class. You provided the key \"{k}\" for the provided dictionary but" + f"possible keys are limited to {self.envClass.KEYS_RESET_OPTIONS}.") + # user provided one initial state, I copy it to all + # evaluation + reset_options = [reset_options.copy() for _ in range(nb_episode)] + elif isinstance(reset_options, (list, tuple, np.ndarray)): + # user provided a list ofreset_options, it should match the + # number of scenarios + if len(reset_options) != nb_episode: + raise RuntimeError( + 'You want to compute "{}" run(s) but provide only "{}" different reset options.' + "".format(nb_episode, len(reset_options)) + ) + for i, el in enumerate(reset_options): + if not isinstance(el, dict): + raise RuntimeError("When specifying `reset_options` kwargs with a list (or a tuple) " + "it should be a list (or a tuple) of dictionary or BaseAction. " + f"You provided {type(el)} at position {i}.") + for i, el in enumerate(reset_options): + for k in el: + if not k in self.envClass.KEYS_RESET_OPTIONS: + raise RuntimeError("Wehn specifying `reset options` all keys of the dictionary should " + "be compatible with the available reset options of your environment " + f"class. You provided the key \"{k}\" for the {i}th dictionary but" + f"possible keys are limited to {self.envClass.KEYS_RESET_OPTIONS}.") + else: + raise RuntimeError("When using `reset_options` in the runner, you should make sure to use " + "either use dictionnary, grid2op actions or list / tuple of actions.") + if max_iter is not None: max_iter = int(max_iter) @@ -1378,7 +1723,8 @@ def run( episode_id=episode_id, add_detailed_output=add_detailed_output, add_nb_highres_sim=add_nb_highres_sim, - init_states=init_states + init_states=init_states, + reset_options=reset_options ) else: if add_detailed_output and (_IS_WINDOWS or _IS_MACOS): @@ -1397,7 +1743,8 @@ def run( episode_id=episode_id, add_detailed_output=add_detailed_output, add_nb_highres_sim=add_nb_highres_sim, - init_states=init_states + init_states=init_states, + reset_options=reset_options ) else: self.logger.info("Parallel runner used.") @@ -1411,7 +1758,8 @@ def run( episode_id=episode_id, add_detailed_output=add_detailed_output, add_nb_highres_sim=add_nb_highres_sim, - init_states=init_states + init_states=init_states, + reset_options=reset_options ) finally: self._clean_up() diff --git a/grid2op/Space/GridObjects.py b/grid2op/Space/GridObjects.py index 361f91253..5b8d285b8 100644 --- a/grid2op/Space/GridObjects.py +++ b/grid2op/Space/GridObjects.py @@ -19,13 +19,15 @@ """ import warnings import copy +import os import numpy as np +import sys from packaging import version from typing import Dict, Union, Literal, Any, List, Optional, ClassVar, Tuple import grid2op from grid2op.dtypes import dt_int, dt_float, dt_bool -from grid2op.typing_variables import CLS_AS_DICT_TYPING +from grid2op.typing_variables import CLS_AS_DICT_TYPING, N_BUSBAR_PER_SUB_TYPING from grid2op.Exceptions import * from grid2op.Space.space_utils import extract_from_dict, save_to_dict @@ -635,7 +637,8 @@ def __init__(self): pass @classmethod - def set_n_busbar_per_sub(cls, n_busbar_per_sub: int) -> None: + def set_n_busbar_per_sub(cls, n_busbar_per_sub: N_BUSBAR_PER_SUB_TYPING) -> None: + # TODO n_busbar_per_sub different num per substations cls.n_busbar_per_sub = n_busbar_per_sub @classmethod @@ -1364,7 +1367,7 @@ def _compute_pos_big_topo(self): self._init_class_attr(_topo_vect_only=True) cls = type(self) cls._compute_pos_big_topo_cls() - + @classmethod def _compute_pos_big_topo_cls(cls): """ @@ -1395,8 +1398,9 @@ def _compute_pos_big_topo_cls(cls): ): # no storage on the grid, so i deactivate them cls.set_no_storage() - cls._compute_sub_elements() - cls._compute_sub_pos() + cls._compute_sub_elements() # fill the dim_topo and sub_info attributes + cls._compute_sub_pos() # fill the _to_sub_pos attributes + cls._fill_names() # fill the name_xxx attributes cls.load_pos_topo_vect = cls._aux_pos_big_topo( cls.load_to_subid, cls.load_to_sub_pos @@ -1609,19 +1613,22 @@ def _fill_names(cls): cls._reset_cls_dict() if cls.shunts_data_available and cls.name_shunt is None: - cls.name_shunt = [ - "shunt_{}_{}".format(bus_id, sh_id) - for sh_id, bus_id in enumerate(cls.shunt_to_subid) - ] - cls.name_shunt = np.array(cls.name_shunt) - warnings.warn( - "name_shunt is None so default storage unit names have been assigned to your grid. " - "(FYI: storage names are used to make the correspondence between the chronics and " - "the backend)" - "This might result in impossibility to load data." - '\n\tIf "env.make" properly worked, you can safely ignore this warning.' - ) - cls._reset_cls_dict() + if cls.shunt_to_subid is not None: + # used for legacy lightsim2grid + # shunt names were defined after... + cls.name_shunt = [ + "shunt_{}_{}".format(bus_id, sh_id) + for sh_id, bus_id in enumerate(cls.shunt_to_subid) + ] + cls.name_shunt = np.array(cls.name_shunt) + warnings.warn( + "name_shunt is None so default shunt names have been assigned to your grid. " + "(FYI: shunt names are used to make the correspondence between the chronics and " + "the backend)" + "This might result in impossibility to load data." + '\n\tIf "env.make" properly worked, you can safely ignore this warning.' + ) + cls._reset_cls_dict() @classmethod def _check_names(cls): @@ -2019,10 +2026,21 @@ def assert_grid_correct_cls(cls): # TODO refactor this method with the `_check***` methods. # TODO refactor the `_check***` to use the same "base functions" that would be coded only once. - if cls.n_busbar_per_sub != int(cls.n_busbar_per_sub): - raise EnvError(f"`n_busbar_per_sub` should be convertible to an integer, found {cls.n_busbar_per_sub}") - cls.n_busbar_per_sub = int(cls.n_busbar_per_sub) - if cls.n_busbar_per_sub < 1: + # TODO n_busbar_per_sub different num per substations + if isinstance(cls.n_busbar_per_sub, (int, dt_int, np.int32, np.int64)): + cls.n_busbar_per_sub = dt_int(cls.n_busbar_per_sub) + # np.full(cls.n_sub, + # fill_value=cls.n_busbar_per_sub, + # dtype=dt_int) + else: + # cls.n_busbar_per_sub = np.array(cls.n_busbar_per_sub) + # cls.n_busbar_per_sub = cls.n_busbar_per_sub.astype(dt_int) + raise EnvError("Grid2op cannot handle a different number of busbar per substations at the moment.") + + # if cls.n_busbar_per_sub != int(cls.n_busbar_per_sub): + # raise EnvError(f"`n_busbar_per_sub` should be convertible to an integer, found {cls.n_busbar_per_sub}") + # cls.n_busbar_per_sub = int(cls.n_busbar_per_sub) + if (cls.n_busbar_per_sub < 1).any(): raise EnvError(f"`n_busbar_per_sub` should be >= 1 found {cls.n_busbar_per_sub}") if cls.n_gen <= 0: @@ -2334,14 +2352,14 @@ def _check_validity_alarm_data(cls): # the "alarm" feature is supported assert isinstance( - cls.alarms_area_names, list - ), "cls.alarms_area_names should be a list" + cls.alarms_area_names, (list, tuple) + ), "cls.alarms_area_names should be a list or a tuple" assert isinstance( cls.alarms_lines_area, dict ), "cls.alarms_lines_area should be a dict" assert isinstance( - cls.alarms_area_lines, list - ), "cls.alarms_area_lines should be a dict" + cls.alarms_area_lines, (list, tuple) + ), "cls.alarms_area_lines should be a list or a tuple" assert ( len(cls.alarms_area_names) == cls.dim_alarms ), "len(cls.alarms_area_names) != cls.dim_alarms" @@ -2860,7 +2878,48 @@ def set_env_name(cls, name): cls.env_name = name @classmethod - def init_grid(cls, gridobj, force=False, extra_name=None, force_module=None): + def _aux_init_grid_from_cls(cls, gridobj, name_res): + import importlib + # NB: these imports needs to be consistent with what is done in + # base_env.generate_classes() + super_module_nm, module_nm = os.path.split(gridobj._PATH_GRID_CLASSES) + if module_nm == "_grid2op_classes": + # legacy "experimental_read_from_local_dir" + # issue was the module "_grid2op_classes" had the same name + # regardless of the environment, so grid2op was "confused" + env_path, env_nm = os.path.split(super_module_nm) + if env_path not in sys.path: + sys.path.append(env_path) + super_supermodule = importlib.import_module(env_nm) + module_nm = f"{env_nm}.{module_nm}" + super_module_nm = super_supermodule + + if f"{module_nm}.{name_res}_file" in sys.modules: + cls_res = getattr(sys.modules[f"{module_nm}.{name_res}_file"], name_res) + # do not forget to create the cls_dict once and for all + if cls_res._CLS_DICT is None: + tmp = {} + cls_res._make_cls_dict_extended(cls_res, tmp, as_list=False) + return cls_res + + super_module = importlib.import_module(module_nm, super_module_nm) # env/path/_grid2op_classes/ + module_all_classes = importlib.import_module(f"{module_nm}") # module specific to the tmpdir created + try: + module = importlib.import_module(f".{name_res}_file", package=module_nm) # module containing the definition of the class + except ModuleNotFoundError: + # in case we need to build the cache again if the module is not found the first time + importlib.invalidate_caches() + importlib.reload(super_module) + module = importlib.import_module(f".{name_res}_file", package=module_nm) + cls_res = getattr(module, name_res) + # do not forget to create the cls_dict once and for all + if cls_res._CLS_DICT is None: + tmp = {} + cls_res._make_cls_dict_extended(cls_res, tmp, as_list=False) + return cls_res + + @classmethod + def init_grid(cls, gridobj, force=False, extra_name=None, force_module=None, _local_dir_cls=None): """ INTERNAL @@ -2900,20 +2959,45 @@ def init_grid(cls, gridobj, force=False, extra_name=None, force_module=None): # with shunt and without shunt, then # there might be issues name_res += "_noshunt" - + + # TODO n_busbar_per_sub different num per substations: if it's a vector, use some kind of hash of it + # for the name of the class ! if gridobj.n_busbar_per_sub != DEFAULT_N_BUSBAR_PER_SUB: # to be able to load same environment with # different `n_busbar_per_sub` name_res += f"_{gridobj.n_busbar_per_sub}" + + if _local_dir_cls is not None and gridobj._PATH_GRID_CLASSES is not None: + # new in grid2op 1.10.3: + # if I end up here it's because (done in base_env.generate_classes()): + # 1) the first initial env has already been created + # 2) I need to init the class from the files (and not from whetever else) + # So i do it. And if that is the case, the files are created on the hard drive + # AND the module is added to the path + + # check that it matches (security / consistency check) + if not os.path.samefile(_local_dir_cls.name , gridobj._PATH_GRID_CLASSES): + # in windows the string comparison fails because of things like "/", "\" or "\\" + # this is why we use "samefile" + raise EnvError(f"Unable to create the class: mismatch between " + f"_local_dir_cls ({_local_dir_cls.name}) and " + f" _PATH_GRID_CLASSES ({gridobj._PATH_GRID_CLASSES})") + return cls._aux_init_grid_from_cls(gridobj, name_res) + elif gridobj._PATH_GRID_CLASSES is not None: + # If I end up it's because the environment is created with already initialized + # classes. + return cls._aux_init_grid_from_cls(gridobj, name_res) + # legacy behaviour: build the class "on the fly" + # of new (>= 1.10.3 for the intial creation of the environment) if name_res in globals(): - if not force: + if not force and _local_dir_cls is None: # no need to recreate the class, it already exists return globals()[name_res] else: # i recreate the variable del globals()[name_res] - + cls_attr_as_dict = {} GridObjects._make_cls_dict_extended(gridobj, cls_attr_as_dict, as_list=False) res_cls = type(name_res, (cls,), cls_attr_as_dict) @@ -4089,10 +4173,16 @@ class res(GridObjects): cls.glop_version = cls.BEFORE_COMPAT_VERSION if "_PATH_GRID_CLASSES" in dict_: - cls._PATH_GRID_CLASSES = str(dict_["_PATH_GRID_CLASSES"]) + if dict_["_PATH_GRID_CLASSES"] is not None: + cls._PATH_GRID_CLASSES = str(dict_["_PATH_GRID_CLASSES"]) + else: + cls._PATH_GRID_CLASSES = None elif "_PATH_ENV" in dict_: # legacy mode in grid2op <= 1.10.1 this was saved in "PATH_ENV" - cls._PATH_GRID_CLASSES = str(dict_["_PATH_ENV"]) + if dict_["_PATH_ENV"] is not None: + cls._PATH_GRID_CLASSES = str(dict_["_PATH_ENV"]) + else: + cls._PATH_GRID_CLASSES = None else: cls._PATH_GRID_CLASSES = None @@ -4849,11 +4939,11 @@ class {cls.__name__}({cls._INIT_GRID_CLS.__name__}): # name of the objects env_name = "{cls.env_name}" - name_load = np.array([{name_load_str}]) - name_gen = np.array([{name_gen_str}]) - name_line = np.array([{name_line_str}]) - name_sub = np.array([{name_sub_str}]) - name_storage = np.array([{name_storage_str}]) + name_load = np.array([{name_load_str}], dtype=str) + name_gen = np.array([{name_gen_str}], dtype=str) + name_line = np.array([{name_line_str}], dtype=str) + name_sub = np.array([{name_sub_str}], dtype=str) + name_storage = np.array([{name_storage_str}], dtype=str) n_busbar_per_sub = {cls.n_busbar_per_sub} n_gen = {cls.n_gen} @@ -4917,7 +5007,7 @@ class {cls.__name__}({cls._INIT_GRID_CLS.__name__}): gen_renewable = {gen_renewable_str} # storage unit static data - storage_type = np.array([{storage_type_str}]) + storage_type = np.array([{storage_type_str}], dtype=str) storage_Emax = {storage_Emax_str} storage_Emin = {storage_Emin_str} storage_max_p_prod = {storage_max_p_prod_str} @@ -4947,7 +5037,7 @@ class {cls.__name__}({cls._INIT_GRID_CLS.__name__}): alarms_area_lines = {alarms_area_lines_str} # alert feature - dim_alert = {cls.dim_alerts} + dim_alerts = {cls.dim_alerts} alertable_line_names = {alertable_line_names_str} alertable_line_ids = {alertable_line_ids_str} diff --git a/grid2op/Space/SerializableSpace.py b/grid2op/Space/SerializableSpace.py index a19a57b5a..379743169 100644 --- a/grid2op/Space/SerializableSpace.py +++ b/grid2op/Space/SerializableSpace.py @@ -61,7 +61,7 @@ class SerializableSpace(GridObjects, RandomObject): """ - def __init__(self, gridobj, subtype=object, _init_grid=True): + def __init__(self, gridobj, subtype=object, _init_grid=True, _local_dir_cls=None): """ subtype: ``type`` @@ -83,7 +83,7 @@ def __init__(self, gridobj, subtype=object, _init_grid=True): RandomObject.__init__(self) self._init_subtype = subtype # do not use, use to save restore only !!! if _init_grid: - self.subtype = subtype.init_grid(gridobj) + self.subtype = subtype.init_grid(gridobj, _local_dir_cls=_local_dir_cls) from grid2op.Action import ( BaseAction, ) # lazy loading to prevent circular reference @@ -185,7 +185,8 @@ def from_dict(dict_): gridobj = GridObjects.from_dict(dict_) actionClass_str = extract_from_dict(dict_, "_init_subtype", str) actionClass_li = actionClass_str.split(".") - + _local_dir_cls = None # TODO when reading back the data + if actionClass_li[-1] in globals(): subtype = globals()[actionClass_li[-1]] else: @@ -265,8 +266,8 @@ def from_dict(dict_): msg_err_ = msg_err_.format(actionClass_str) raise Grid2OpException(msg_err_) # create the proper SerializableSpace class for this environment - CLS = SerializableSpace.init_grid(gridobj) - res = CLS(gridobj=gridobj, subtype=subtype, _init_grid=True) + CLS = SerializableSpace.init_grid(gridobj, _local_dir_cls=_local_dir_cls) + res = CLS(gridobj=gridobj, subtype=subtype, _init_grid=True, _local_dir_cls=_local_dir_cls) return res def cls_to_dict(self): diff --git a/grid2op/VoltageControler/BaseVoltageController.py b/grid2op/VoltageControler/BaseVoltageController.py index 02eb6c978..e29fc883f 100644 --- a/grid2op/VoltageControler/BaseVoltageController.py +++ b/grid2op/VoltageControler/BaseVoltageController.py @@ -23,7 +23,7 @@ class BaseVoltageController(RandomObject, ABC): If the voltages are not on the chronics (missing files), it will not change the voltage setpoints at all. """ - def __init__(self, gridobj, controler_backend, actionSpace_cls): + def __init__(self, gridobj, controler_backend, actionSpace_cls, _local_dir_cls=None): """ Parameters @@ -39,7 +39,10 @@ def __init__(self, gridobj, controler_backend, actionSpace_cls): legal_act = AlwaysLegal() self._actionSpace_cls = actionSpace_cls self.action_space = actionSpace_cls( - gridobj=gridobj, actionClass=VoltageOnlyAction, legal_action=legal_act + gridobj=gridobj, + actionClass=VoltageOnlyAction, + legal_action=legal_act, + _local_dir_cls=_local_dir_cls ) def _custom_deepcopy_for_copy(self, new_obj): diff --git a/grid2op/VoltageControler/ControlVoltageFromFile.py b/grid2op/VoltageControler/ControlVoltageFromFile.py index ed6004842..3322eafe0 100644 --- a/grid2op/VoltageControler/ControlVoltageFromFile.py +++ b/grid2op/VoltageControler/ControlVoltageFromFile.py @@ -19,7 +19,11 @@ class ControlVoltageFromFile(BaseVoltageController): If the voltages are not on the chronics (missing files), it will not change the voltage setpoint at all. """ - def __init__(self, gridobj, controler_backend, actionSpace_cls): + def __init__(self, + gridobj, + controler_backend, + actionSpace_cls, + _local_dir_cls=None): """ Parameters @@ -36,6 +40,7 @@ def __init__(self, gridobj, controler_backend, actionSpace_cls): gridobj=gridobj, controler_backend=controler_backend, actionSpace_cls=actionSpace_cls, + _local_dir_cls=_local_dir_cls ) def fix_voltage(self, observation, agent_action, env_action, prod_v_chronics): diff --git a/grid2op/__init__.py b/grid2op/__init__.py index c2c9e6a0a..32bbc6599 100644 --- a/grid2op/__init__.py +++ b/grid2op/__init__.py @@ -11,7 +11,7 @@ Grid2Op """ -__version__ = '1.10.2' +__version__ = '1.10.3.dev1' __all__ = [ "Action", diff --git a/grid2op/data/educ_case14_redisp/__init__.py b/grid2op/data/educ_case14_redisp/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/educ_case14_redisp/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/educ_case14_storage/__init__.py b/grid2op/data/educ_case14_storage/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/educ_case14_storage/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/l2rpn_case14_sandbox/__init__.py b/grid2op/data/l2rpn_case14_sandbox/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/l2rpn_case14_sandbox/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/l2rpn_case14_sandbox_diff_grid/__init__.py b/grid2op/data/l2rpn_case14_sandbox_diff_grid/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/l2rpn_case14_sandbox_diff_grid/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/l2rpn_icaps_2021/__init__.py b/grid2op/data/l2rpn_icaps_2021/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/l2rpn_icaps_2021/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/l2rpn_idf_2023/__init__.py b/grid2op/data/l2rpn_idf_2023/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/l2rpn_idf_2023/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/l2rpn_neurips_2020_track1/__init__.py b/grid2op/data/l2rpn_neurips_2020_track1/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/l2rpn_neurips_2020_track1/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/l2rpn_neurips_2020_track2/x1/__init__.py b/grid2op/data/l2rpn_neurips_2020_track2/x1/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/l2rpn_neurips_2020_track2/x1/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/l2rpn_neurips_2020_track2/x2.5/__init__.py b/grid2op/data/l2rpn_neurips_2020_track2/x2.5/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/l2rpn_neurips_2020_track2/x2.5/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/l2rpn_wcci_2020/__init__.py b/grid2op/data/l2rpn_wcci_2020/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/l2rpn_wcci_2020/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/l2rpn_wcci_2022_dev/__init__.py b/grid2op/data/l2rpn_wcci_2022_dev/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/l2rpn_wcci_2022_dev/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/rte_case118_example/__init__.py b/grid2op/data/rte_case118_example/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/rte_case118_example/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/rte_case14_opponent/__init__.py b/grid2op/data/rte_case14_opponent/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/rte_case14_opponent/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/rte_case14_realistic/__init__.py b/grid2op/data/rte_case14_realistic/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/rte_case14_realistic/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/rte_case14_redisp/__init__.py b/grid2op/data/rte_case14_redisp/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/rte_case14_redisp/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/rte_case14_test/__init__.py b/grid2op/data/rte_case14_test/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/rte_case14_test/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/rte_case5_example/__init__.py b/grid2op/data/rte_case5_example/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/rte_case5_example/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data_test/l2rpn_idf_2023_with_alert/__init__.py b/grid2op/data_test/l2rpn_idf_2023_with_alert/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data_test/l2rpn_idf_2023_with_alert/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data_test/multimix/case14_002/chronics/0/hazards.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/0/hazards.csv.bz2 new file mode 100644 index 000000000..19f4c400c Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/0/hazards.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/0/load_p.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/0/load_p.csv.bz2 new file mode 100644 index 000000000..5e58c54d6 Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/0/load_p.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/0/load_p_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/0/load_p_forecasted.csv.bz2 new file mode 100644 index 000000000..afa02b2b1 Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/0/load_p_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/0/load_q.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/0/load_q.csv.bz2 new file mode 100644 index 000000000..335611c49 Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/0/load_q.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/0/load_q_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/0/load_q_forecasted.csv.bz2 new file mode 100644 index 000000000..3183c02d1 Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/0/load_q_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/0/maintenance.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/0/maintenance.csv.bz2 new file mode 100644 index 000000000..19f4c400c Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/0/maintenance.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/0/maintenance_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/0/maintenance_forecasted.csv.bz2 new file mode 100644 index 000000000..19f4c400c Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/0/maintenance_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/0/prod_p.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/0/prod_p.csv.bz2 new file mode 100644 index 000000000..b523af8b6 Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/0/prod_p.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/0/prod_p_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/0/prod_p_forecasted.csv.bz2 new file mode 100644 index 000000000..7ee0bdb2b Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/0/prod_p_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/0/prod_v.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/0/prod_v.csv.bz2 new file mode 100644 index 000000000..2d590080e Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/0/prod_v.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/0/prod_v_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/0/prod_v_forecasted.csv.bz2 new file mode 100644 index 000000000..b7c0d91cc Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/0/prod_v_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/000/start_datetime.info b/grid2op/data_test/multimix/case14_002/chronics/0/start_datetime.info similarity index 100% rename from grid2op/data_test/multimix/case14_002/chronics/000/start_datetime.info rename to grid2op/data_test/multimix/case14_002/chronics/0/start_datetime.info diff --git a/grid2op/data_test/multimix/case14_002/chronics/000/time_interval.info b/grid2op/data_test/multimix/case14_002/chronics/0/time_interval.info similarity index 100% rename from grid2op/data_test/multimix/case14_002/chronics/000/time_interval.info rename to grid2op/data_test/multimix/case14_002/chronics/0/time_interval.info diff --git a/grid2op/data_test/multimix/case14_002/chronics/000/load_p.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/000/load_p.csv.bz2 deleted file mode 100644 index 77fd7af71..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/000/load_p.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/000/load_p_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/000/load_p_forecasted.csv.bz2 deleted file mode 100644 index ce08ec0e1..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/000/load_p_forecasted.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/000/load_q.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/000/load_q.csv.bz2 deleted file mode 100644 index b2b092db7..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/000/load_q.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/000/load_q_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/000/load_q_forecasted.csv.bz2 deleted file mode 100644 index 631f0f40b..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/000/load_q_forecasted.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/000/prod_p.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/000/prod_p.csv.bz2 deleted file mode 100644 index 84bf12179..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/000/prod_p.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/000/prod_p_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/000/prod_p_forecasted.csv.bz2 deleted file mode 100644 index 2d7ef6442..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/000/prod_p_forecasted.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/000/prod_v.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/000/prod_v.csv.bz2 deleted file mode 100644 index c300e1563..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/000/prod_v.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/000/prod_v_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/000/prod_v_forecasted.csv.bz2 deleted file mode 100644 index 70cb99dbc..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/000/prod_v_forecasted.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/001/load_p.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/001/load_p.csv.bz2 deleted file mode 100644 index 30336696c..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/001/load_p.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/001/load_p_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/001/load_p_forecasted.csv.bz2 deleted file mode 100644 index 5de1d99be..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/001/load_p_forecasted.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/001/load_q.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/001/load_q.csv.bz2 deleted file mode 100644 index 17d69b9a6..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/001/load_q.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/001/load_q_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/001/load_q_forecasted.csv.bz2 deleted file mode 100644 index 303dbf42d..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/001/load_q_forecasted.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/001/prod_p.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/001/prod_p.csv.bz2 deleted file mode 100644 index 2a1cf249d..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/001/prod_p.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/001/prod_p_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/001/prod_p_forecasted.csv.bz2 deleted file mode 100644 index c7bc25425..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/001/prod_p_forecasted.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/001/prod_v.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/001/prod_v.csv.bz2 deleted file mode 100644 index c300e1563..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/001/prod_v.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/001/prod_v_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/001/prod_v_forecasted.csv.bz2 deleted file mode 100644 index 70cb99dbc..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/001/prod_v_forecasted.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/1/hazards.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/1/hazards.csv.bz2 new file mode 100644 index 000000000..19f4c400c Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/1/hazards.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/1/load_p.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/1/load_p.csv.bz2 new file mode 100644 index 000000000..1fb2cfedb Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/1/load_p.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/1/load_p_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/1/load_p_forecasted.csv.bz2 new file mode 100644 index 000000000..6ec178d02 Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/1/load_p_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/1/load_q.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/1/load_q.csv.bz2 new file mode 100644 index 000000000..f398706f6 Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/1/load_q.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/1/load_q_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/1/load_q_forecasted.csv.bz2 new file mode 100644 index 000000000..8deb04b51 Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/1/load_q_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/1/maintenance.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/1/maintenance.csv.bz2 new file mode 100644 index 000000000..19f4c400c Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/1/maintenance.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/1/maintenance_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/1/maintenance_forecasted.csv.bz2 new file mode 100644 index 000000000..19f4c400c Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/1/maintenance_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/1/prod_p.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/1/prod_p.csv.bz2 new file mode 100644 index 000000000..c13834eb5 Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/1/prod_p.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/1/prod_p_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/1/prod_p_forecasted.csv.bz2 new file mode 100644 index 000000000..6fed9d123 Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/1/prod_p_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/1/prod_v.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/1/prod_v.csv.bz2 new file mode 100644 index 000000000..2d590080e Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/1/prod_v.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/1/prod_v_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/1/prod_v_forecasted.csv.bz2 new file mode 100644 index 000000000..b7c0d91cc Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/1/prod_v_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/001/start_datetime.info b/grid2op/data_test/multimix/case14_002/chronics/1/start_datetime.info similarity index 100% rename from grid2op/data_test/multimix/case14_002/chronics/001/start_datetime.info rename to grid2op/data_test/multimix/case14_002/chronics/1/start_datetime.info diff --git a/grid2op/data_test/multimix/case14_002/chronics/001/time_interval.info b/grid2op/data_test/multimix/case14_002/chronics/1/time_interval.info similarity index 100% rename from grid2op/data_test/multimix/case14_002/chronics/001/time_interval.info rename to grid2op/data_test/multimix/case14_002/chronics/1/time_interval.info diff --git a/grid2op/data_test/multimix/case14_002/config.py b/grid2op/data_test/multimix/case14_002/config.py index d2e6e585c..1c34314d6 100644 --- a/grid2op/data_test/multimix/case14_002/config.py +++ b/grid2op/data_test/multimix/case14_002/config.py @@ -15,26 +15,26 @@ "grid_value_class": GridStateFromFileWithForecasts, "volagecontroler_class": None, "thermal_limits": [ - 384.900179, - 384.900179, - 380.0, - 380.0, - 157.0, - 380.0, - 380.0, - 1077.7205012, - 461.8802148, - 769.80036, - 269.4301253, - 384.900179, - 760.0, - 380.0, - 760.0, - 384.900179, - 230.9401074, - 170.79945452, - 3402.24266, - 3402.24266, + 3.84900179e02, + 3.84900179e02, + 2.28997102e05, + 2.28997102e05, + 2.28997102e05, + 1.52664735e04, + 2.28997102e05, + 3.84900179e02, + 3.84900179e02, + 1.83285800e02, + 3.84900179e02, + 3.84900179e02, + 2.28997102e05, + 2.28997102e05, + 6.93930612e04, + 3.84900179e02, + 3.84900179e02, + 2.40562612e02, + 3.40224266e03, + 3.40224266e03, ], "names_chronics_to_grid": None, } diff --git a/grid2op/data_test/multimix/case14_002/grid.json b/grid2op/data_test/multimix/case14_002/grid.json index 88699329a..27dacefd7 100644 --- a/grid2op/data_test/multimix/case14_002/grid.json +++ b/grid2op/data_test/multimix/case14_002/grid.json @@ -1,1363 +1,5 @@ { - "_module": "pandapower.auxiliary", - "_class": "pandapowerNet", - "_object": { - "bus": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"vn_kv\",\"type\",\"zone\",\"in_service\",\"min_vm_pu\",\"max_vm_pu\"],\"index\":[0,1,2,3,4,5,6,7,8,9,10,11,12,13],\"data\":[[1,138.0,\"b\",1.0,true,0.94,1.06],[2,138.0,\"b\",1.0,true,0.94,1.06],[3,138.0,\"b\",1.0,true,0.94,1.06],[4,138.0,\"b\",1.0,true,0.94,1.06],[5,138.0,\"b\",1.0,true,0.94,1.06],[6,20.0,\"b\",1.0,true,0.94,1.06],[7,14.0,\"b\",1.0,true,0.94,1.06],[8,12.0,\"b\",1.0,true,0.94,1.06],[9,20.0,\"b\",1.0,true,0.94,1.06],[10,20.0,\"b\",1.0,true,0.94,1.06],[11,20.0,\"b\",1.0,true,0.94,1.06],[12,20.0,\"b\",1.0,true,0.94,1.06],[13,20.0,\"b\",1.0,true,0.94,1.06],[14,20.0,\"b\",1.0,true,0.94,1.06]]}", - "orient": "split", - "dtype": { - "name": "object", - "vn_kv": "float64", - "type": "object", - "zone": "object", - "in_service": "bool", - "min_vm_pu": "float64", - "max_vm_pu": "float64" - } - }, - "load": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"bus\",\"p_mw\",\"q_mvar\",\"const_z_percent\",\"const_i_percent\",\"sn_mva\",\"scaling\",\"in_service\",\"type\",\"controllable\"],\"index\":[0,1,2,3,4,5,6,7,8,9,10],\"data\":[[null,1,21.699999999999999,12.699999999999999,0.0,0.0,null,1.0,true,null,false],[null,2,94.200000000000003,19.0,0.0,0.0,null,1.0,true,null,false],[null,3,47.799999999999997,-3.9,0.0,0.0,null,1.0,true,null,false],[null,4,7.6,1.6,0.0,0.0,null,1.0,true,null,false],[null,5,11.199999999999999,7.5,0.0,0.0,null,1.0,true,null,false],[null,8,29.5,16.600000000000001,0.0,0.0,null,1.0,true,null,false],[null,9,9.0,5.8,0.0,0.0,null,1.0,true,null,false],[null,10,3.5,1.8,0.0,0.0,null,1.0,true,null,false],[null,11,6.1,1.6,0.0,0.0,null,1.0,true,null,false],[null,12,13.5,5.8,0.0,0.0,null,1.0,true,null,false],[null,13,14.9,5.0,0.0,0.0,null,1.0,true,null,false]]}", - "orient": "split", - "dtype": { - "name": "object", - "bus": "uint32", - "p_mw": "float64", - "q_mvar": "float64", - "const_z_percent": "float64", - "const_i_percent": "float64", - "sn_mva": "float64", - "scaling": "float64", - "in_service": "bool", - "type": "object", - "controllable": "object" - } - }, - "sgen": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"bus\",\"p_mw\",\"q_mvar\",\"sn_mva\",\"scaling\",\"in_service\",\"type\",\"current_source\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "name": "object", - "bus": "int64", - "p_mw": "float64", - "q_mvar": "float64", - "sn_mva": "float64", - "scaling": "float64", - "in_service": "bool", - "type": "object", - "current_source": "bool" - } - }, - "storage": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"bus\",\"p_mw\",\"q_mvar\",\"sn_mva\",\"soc_percent\",\"min_e_mwh\",\"max_e_mwh\",\"scaling\",\"in_service\",\"type\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "name": "object", - "bus": "int64", - "p_mw": "float64", - "q_mvar": "float64", - "sn_mva": "float64", - "soc_percent": "float64", - "min_e_mwh": "float64", - "max_e_mwh": "float64", - "scaling": "float64", - "in_service": "bool", - "type": "object" - } - }, - "gen": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"bus\",\"p_mw\",\"vm_pu\",\"sn_mva\",\"min_q_mvar\",\"max_q_mvar\",\"scaling\",\"slack\",\"in_service\",\"type\",\"controllable\",\"min_p_mw\",\"max_p_mw\"],\"index\":[0,1,2,3],\"data\":[[null,1,40.0,1.045,null,-40.0,50.0,1.0,false,true,null,true,0.0,140.0],[null,2,0.0,1.01,null,0.0,40.0,1.0,false,true,null,true,0.0,100.0],[null,5,0.0,1.07,null,-6.0,24.0,1.0,false,true,null,true,0.0,100.0],[null,7,0.0,1.09,null,-6.0,24.0,1.0,false,true,null,true,0.0,100.0]]}", - "orient": "split", - "dtype": { - "name": "object", - "bus": "uint32", - "p_mw": "float64", - "vm_pu": "float64", - "sn_mva": "float64", - "min_q_mvar": "float64", - "max_q_mvar": "float64", - "scaling": "float64", - "slack": "bool", - "in_service": "bool", - "type": "object", - "controllable": "bool", - "min_p_mw": "float64", - "max_p_mw": "float64" - } - }, - "switch": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"bus\",\"element\",\"et\",\"type\",\"closed\",\"name\",\"z_ohm\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "bus": "int64", - "element": "int64", - "et": "object", - "type": "object", - "closed": "bool", - "name": "object", - "z_ohm": "float64" - } - }, - "shunt": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"bus\",\"name\",\"q_mvar\",\"p_mw\",\"vn_kv\",\"step\",\"max_step\",\"in_service\"],\"index\":[0],\"data\":[[8,null,-19.0,0.0,20.0,1,1,true]]}", - "orient": "split", - "dtype": { - "bus": "uint32", - "name": "object", - "q_mvar": "float64", - "p_mw": "float64", - "vn_kv": "float64", - "step": "uint32", - "max_step": "uint32", - "in_service": "bool" - } - }, - "ext_grid": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"bus\",\"vm_pu\",\"va_degree\",\"in_service\",\"min_p_mw\",\"max_p_mw\",\"min_q_mvar\",\"max_q_mvar\"],\"index\":[0],\"data\":[[null,0,1.06,0.0,true,0.0,332.399999999999977,0.0,10.0]]}", - "orient": "split", - "dtype": { - "name": "object", - "bus": "uint32", - "vm_pu": "float64", - "va_degree": "float64", - "in_service": "bool", - "min_p_mw": "float64", - "max_p_mw": "float64", - "min_q_mvar": "float64", - "max_q_mvar": "float64" - } - }, - "line": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"std_type\",\"from_bus\",\"to_bus\",\"length_km\",\"r_ohm_per_km\",\"x_ohm_per_km\",\"c_nf_per_km\",\"g_us_per_km\",\"max_i_ka\",\"df\",\"parallel\",\"type\",\"in_service\",\"max_loading_percent\"],\"index\":[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],\"data\":[[null,null,0,1,1.0,3.6907272,11.2683348,882.522683811391971,0.0,41.418606267951418,1.0,1,\"ol\",true,100.0],[null,null,0,4,1.0,10.2894732,42.475737599999995,822.350682642433412,0.0,41.418606267951418,1.0,1,\"ol\",true,100.0],[null,null,1,2,1.0,8.948775599999999,37.701406800000001,732.092680888995574,0.0,41.418606267951418,1.0,1,\"ol\",true,100.0],[null,null,1,3,1.0,11.0664684,33.578380799999998,568.29112215127509,0.0,41.418606267951418,1.0,1,\"ol\",true,100.0],[null,null,1,4,1.0,10.845558,33.1137072,578.319789012768069,0.0,41.418606267951418,1.0,1,\"ol\",true,100.0],[null,null,2,3,1.0,12.761384400000001,32.570953199999998,213.94489304518595,0.0,41.418606267951418,1.0,1,\"ol\",true,100.0],[null,null,3,4,1.0,2.542374,8.019428400000001,0.0,0.0,41.418606267951418,1.0,1,\"ol\",true,100.0],[null,null,5,10,1.0,0.37992,0.7956,0.0,0.0,285.788383248864761,1.0,1,\"ol\",true,100.0],[null,null,5,11,1.0,0.49164,1.02324,0.0,0.0,285.788383248864761,1.0,1,\"ol\",true,100.0],[null,null,5,12,1.0,0.2646,0.52108,0.0,0.0,285.788383248864761,1.0,1,\"ol\",true,100.0],[null,null,8,9,1.0,0.12724,0.338,0.0,0.0,285.788383248864761,1.0,1,\"ol\",true,100.0],[null,null,8,13,1.0,0.50844,1.08152,0.0,0.0,285.788383248864761,1.0,1,\"ol\",true,100.0],[null,null,9,10,1.0,0.3282,0.76828,0.0,0.0,285.788383248864761,1.0,1,\"ol\",true,100.0],[null,null,11,12,1.0,0.88368,0.79952,0.0,0.0,285.788383248864761,1.0,1,\"ol\",true,100.0],[null,null,12,13,1.0,0.68372,1.39208,0.0,0.0,285.788383248864761,1.0,1,\"ol\",true,100.0]]}", - "orient": "split", - "dtype": { - "name": "object", - "std_type": "object", - "from_bus": "uint32", - "to_bus": "uint32", - "length_km": "float64", - "r_ohm_per_km": "float64", - "x_ohm_per_km": "float64", - "c_nf_per_km": "float64", - "g_us_per_km": "float64", - "max_i_ka": "float64", - "df": "float64", - "parallel": "uint32", - "type": "object", - "in_service": "bool", - "max_loading_percent": "float64" - } - }, - "trafo": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"std_type\",\"hv_bus\",\"lv_bus\",\"sn_mva\",\"vn_hv_kv\",\"vn_lv_kv\",\"vk_percent\",\"vkr_percent\",\"pfe_kw\",\"i0_percent\",\"shift_degree\",\"tap_side\",\"tap_neutral\",\"tap_min\",\"tap_max\",\"tap_step_percent\",\"tap_step_degree\",\"tap_pos\",\"tap_phase_shifter\",\"parallel\",\"df\",\"in_service\",\"max_loading_percent\"],\"index\":[0,1,2,3,4],\"data\":[[null,null,3,6,9900.0,138.0,14.0,2070.288000000000011,0.0,0.0,0.0,0.0,\"hv\",0.0,null,null,2.200000000000002,null,-1.0,false,1,1.0,true,100.0],[null,null,3,8,9900.0,138.0,20.0,5506.181999999999789,0.0,0.0,0.0,0.0,\"hv\",0.0,null,null,3.100000000000003,null,-1.0,false,1,1.0,true,100.0],[null,null,4,5,9900.0,138.0,20.0,2494.998000000000047,0.0,0.0,0.0,0.0,\"hv\",0.0,null,null,6.799999999999995,null,-1.0,false,1,1.0,true,100.0],[null,null,6,7,9900.0,14.0,12.0,1743.884999999999991,0.0,0.0,0.0,0.0,null,null,null,null,null,null,null,false,1,1.0,true,100.0],[null,null,8,6,9900.0,20.0,14.0,1089.098999999999933,0.0,0.0,0.0,0.0,null,null,null,null,null,null,null,false,1,1.0,true,100.0]]}", - "orient": "split", - "dtype": { - "name": "object", - "std_type": "object", - "hv_bus": "uint32", - "lv_bus": "uint32", - "sn_mva": "float64", - "vn_hv_kv": "float64", - "vn_lv_kv": "float64", - "vk_percent": "float64", - "vkr_percent": "float64", - "pfe_kw": "float64", - "i0_percent": "float64", - "shift_degree": "float64", - "tap_side": "object", - "tap_neutral": "float64", - "tap_min": "float64", - "tap_max": "float64", - "tap_step_percent": "float64", - "tap_step_degree": "float64", - "tap_pos": "float64", - "tap_phase_shifter": "bool", - "parallel": "uint32", - "df": "float64", - "in_service": "bool", - "max_loading_percent": "float64" - } - }, - "trafo3w": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"std_type\",\"hv_bus\",\"mv_bus\",\"lv_bus\",\"sn_hv_mva\",\"sn_mv_mva\",\"sn_lv_mva\",\"vn_hv_kv\",\"vn_mv_kv\",\"vn_lv_kv\",\"vk_hv_percent\",\"vk_mv_percent\",\"vk_lv_percent\",\"vkr_hv_percent\",\"vkr_mv_percent\",\"vkr_lv_percent\",\"pfe_kw\",\"i0_percent\",\"shift_mv_degree\",\"shift_lv_degree\",\"tap_side\",\"tap_neutral\",\"tap_min\",\"tap_max\",\"tap_step_percent\",\"tap_step_degree\",\"tap_pos\",\"tap_at_star_point\",\"in_service\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "name": "object", - "std_type": "object", - "hv_bus": "uint32", - "mv_bus": "uint32", - "lv_bus": "uint32", - "sn_hv_mva": "float64", - "sn_mv_mva": "float64", - "sn_lv_mva": "float64", - "vn_hv_kv": "float64", - "vn_mv_kv": "float64", - "vn_lv_kv": "float64", - "vk_hv_percent": "float64", - "vk_mv_percent": "float64", - "vk_lv_percent": "float64", - "vkr_hv_percent": "float64", - "vkr_mv_percent": "float64", - "vkr_lv_percent": "float64", - "pfe_kw": "float64", - "i0_percent": "float64", - "shift_mv_degree": "float64", - "shift_lv_degree": "float64", - "tap_side": "object", - "tap_neutral": "int32", - "tap_min": "int32", - "tap_max": "int32", - "tap_step_percent": "float64", - "tap_step_degree": "float64", - "tap_pos": "int32", - "tap_at_star_point": "bool", - "in_service": "bool" - } - }, - "impedance": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"from_bus\",\"to_bus\",\"rft_pu\",\"xft_pu\",\"rtf_pu\",\"xtf_pu\",\"sn_mva\",\"in_service\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "name": "object", - "from_bus": "uint32", - "to_bus": "uint32", - "rft_pu": "float64", - "xft_pu": "float64", - "rtf_pu": "float64", - "xtf_pu": "float64", - "sn_mva": "float64", - "in_service": "bool" - } - }, - "dcline": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"from_bus\",\"to_bus\",\"p_mw\",\"loss_percent\",\"loss_mw\",\"vm_from_pu\",\"vm_to_pu\",\"max_p_mw\",\"min_q_from_mvar\",\"min_q_to_mvar\",\"max_q_from_mvar\",\"max_q_to_mvar\",\"in_service\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "name": "object", - "from_bus": "uint32", - "to_bus": "uint32", - "p_mw": "float64", - "loss_percent": "float64", - "loss_mw": "float64", - "vm_from_pu": "float64", - "vm_to_pu": "float64", - "max_p_mw": "float64", - "min_q_from_mvar": "float64", - "min_q_to_mvar": "float64", - "max_q_from_mvar": "float64", - "max_q_to_mvar": "float64", - "in_service": "bool" - } - }, - "ward": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"bus\",\"ps_mw\",\"qs_mvar\",\"qz_mvar\",\"pz_mw\",\"in_service\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "name": "object", - "bus": "uint32", - "ps_mw": "float64", - "qs_mvar": "float64", - "qz_mvar": "float64", - "pz_mw": "float64", - "in_service": "bool" - } - }, - "xward": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"bus\",\"ps_mw\",\"qs_mvar\",\"qz_mvar\",\"pz_mw\",\"r_ohm\",\"x_ohm\",\"vm_pu\",\"in_service\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "name": "object", - "bus": "uint32", - "ps_mw": "float64", - "qs_mvar": "float64", - "qz_mvar": "float64", - "pz_mw": "float64", - "r_ohm": "float64", - "x_ohm": "float64", - "vm_pu": "float64", - "in_service": "bool" - } - }, - "measurement": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"measurement_type\",\"element_type\",\"element\",\"value\",\"std_dev\",\"side\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "name": "object", - "measurement_type": "object", - "element_type": "object", - "element": "uint32", - "value": "float64", - "std_dev": "float64", - "side": "object" - } - }, - "pwl_cost": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"power_type\",\"element\",\"et\",\"points\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "power_type": "object", - "element": "uint32", - "et": "object", - "points": "object" - } - }, - "poly_cost": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"element\",\"et\",\"cp0_eur\",\"cp1_eur_per_mw\",\"cp2_eur_per_mw2\",\"cq0_eur\",\"cq1_eur_per_mvar\",\"cq2_eur_per_mvar2\"],\"index\":[0,1,2,3,4],\"data\":[[0,\"ext_grid\",0.0,20.0,0.0430293,0.0,0.0,0.0],[0,\"gen\",0.0,20.0,0.25,0.0,0.0,0.0],[1,\"gen\",0.0,40.0,0.01,0.0,0.0,0.0],[2,\"gen\",0.0,40.0,0.01,0.0,0.0,0.0],[3,\"gen\",0.0,40.0,0.01,0.0,0.0,0.0]]}", - "orient": "split", - "dtype": { - "element": "uint32", - "et": "object", - "cp0_eur": "float64", - "cp1_eur_per_mw": "float64", - "cp2_eur_per_mw2": "float64", - "cq0_eur": "float64", - "cq1_eur_per_mvar": "float64", - "cq2_eur_per_mvar2": "float64" - } - }, - "controller": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"object\",\"in_service\",\"order\",\"level\",\"recycle\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "object": "object", - "in_service": "bool", - "order": "float64", - "level": "object", - "recycle": "bool" - } - }, - "line_geodata": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"coords\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "coords": "object" - } - }, - "bus_geodata": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"x\",\"y\",\"coords\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "x": "float64", - "y": "float64", - "coords": "object" - } - }, - "version": "2.2.1", - "converged": false, - "name": "", - "f_hz": 50, - "sn_mva": 100.0, - "std_types": { - "line": { - "NAYY 4x50 SE": { - "c_nf_per_km": 210, - "r_ohm_per_km": 0.642, - "x_ohm_per_km": 0.083, - "max_i_ka": 0.142, - "type": "cs", - "q_mm2": 50, - "alpha": 0.00403 - }, - "NAYY 4x120 SE": { - "c_nf_per_km": 264, - "r_ohm_per_km": 0.225, - "x_ohm_per_km": 0.08, - "max_i_ka": 0.242, - "type": "cs", - "q_mm2": 120, - "alpha": 0.00403 - }, - "NAYY 4x150 SE": { - "c_nf_per_km": 261, - "r_ohm_per_km": 0.208, - "x_ohm_per_km": 0.08, - "max_i_ka": 0.27, - "type": "cs", - "q_mm2": 150, - "alpha": 0.00403 - }, - "NA2XS2Y 1x95 RM/25 12/20 kV": { - "c_nf_per_km": 216, - "r_ohm_per_km": 0.313, - "x_ohm_per_km": 0.132, - "max_i_ka": 0.252, - "type": "cs", - "q_mm2": 95, - "alpha": 0.00403 - }, - "NA2XS2Y 1x185 RM/25 12/20 kV": { - "c_nf_per_km": 273, - "r_ohm_per_km": 0.161, - "x_ohm_per_km": 0.117, - "max_i_ka": 0.362, - "type": "cs", - "q_mm2": 185, - "alpha": 0.00403 - }, - "NA2XS2Y 1x240 RM/25 12/20 kV": { - "c_nf_per_km": 304, - "r_ohm_per_km": 0.122, - "x_ohm_per_km": 0.112, - "max_i_ka": 0.421, - "type": "cs", - "q_mm2": 240, - "alpha": 0.00403 - }, - "NA2XS2Y 1x95 RM/25 6/10 kV": { - "c_nf_per_km": 315, - "r_ohm_per_km": 0.313, - "x_ohm_per_km": 0.123, - "max_i_ka": 0.249, - "type": "cs", - "q_mm2": 95, - "alpha": 0.00403 - }, - "NA2XS2Y 1x185 RM/25 6/10 kV": { - "c_nf_per_km": 406, - "r_ohm_per_km": 0.161, - "x_ohm_per_km": 0.11, - "max_i_ka": 0.358, - "type": "cs", - "q_mm2": 185, - "alpha": 0.00403 - }, - "NA2XS2Y 1x240 RM/25 6/10 kV": { - "c_nf_per_km": 456, - "r_ohm_per_km": 0.122, - "x_ohm_per_km": 0.105, - "max_i_ka": 0.416, - "type": "cs", - "q_mm2": 240, - "alpha": 0.00403 - }, - "NA2XS2Y 1x150 RM/25 12/20 kV": { - "c_nf_per_km": 250, - "r_ohm_per_km": 0.206, - "x_ohm_per_km": 0.116, - "max_i_ka": 0.319, - "type": "cs", - "q_mm2": 150, - "alpha": 0.00403 - }, - "NA2XS2Y 1x120 RM/25 12/20 kV": { - "c_nf_per_km": 230, - "r_ohm_per_km": 0.253, - "x_ohm_per_km": 0.119, - "max_i_ka": 0.283, - "type": "cs", - "q_mm2": 120, - "alpha": 0.00403 - }, - "NA2XS2Y 1x70 RM/25 12/20 kV": { - "c_nf_per_km": 190, - "r_ohm_per_km": 0.443, - "x_ohm_per_km": 0.132, - "max_i_ka": 0.22, - "type": "cs", - "q_mm2": 70, - "alpha": 0.00403 - }, - "NA2XS2Y 1x150 RM/25 6/10 kV": { - "c_nf_per_km": 360, - "r_ohm_per_km": 0.206, - "x_ohm_per_km": 0.11, - "max_i_ka": 0.315, - "type": "cs", - "q_mm2": 150, - "alpha": 0.00403 - }, - "NA2XS2Y 1x120 RM/25 6/10 kV": { - "c_nf_per_km": 340, - "r_ohm_per_km": 0.253, - "x_ohm_per_km": 0.113, - "max_i_ka": 0.28, - "type": "cs", - "q_mm2": 120, - "alpha": 0.00403 - }, - "NA2XS2Y 1x70 RM/25 6/10 kV": { - "c_nf_per_km": 280, - "r_ohm_per_km": 0.443, - "x_ohm_per_km": 0.123, - "max_i_ka": 0.217, - "type": "cs", - "q_mm2": 70, - "alpha": 0.00403 - }, - "N2XS(FL)2Y 1x120 RM/35 64/110 kV": { - "c_nf_per_km": 112, - "r_ohm_per_km": 0.153, - "x_ohm_per_km": 0.166, - "max_i_ka": 0.366, - "type": "cs", - "q_mm2": 120, - "alpha": 0.00393 - }, - "N2XS(FL)2Y 1x185 RM/35 64/110 kV": { - "c_nf_per_km": 125, - "r_ohm_per_km": 0.099, - "x_ohm_per_km": 0.156, - "max_i_ka": 0.457, - "type": "cs", - "q_mm2": 185, - "alpha": 0.00393 - }, - "N2XS(FL)2Y 1x240 RM/35 64/110 kV": { - "c_nf_per_km": 135, - "r_ohm_per_km": 0.075, - "x_ohm_per_km": 0.149, - "max_i_ka": 0.526, - "type": "cs", - "q_mm2": 240, - "alpha": 0.00393 - }, - "N2XS(FL)2Y 1x300 RM/35 64/110 kV": { - "c_nf_per_km": 144, - "r_ohm_per_km": 0.06, - "x_ohm_per_km": 0.144, - "max_i_ka": 0.588, - "type": "cs", - "q_mm2": 300, - "alpha": 0.00393 - }, - "15-AL1/3-ST1A 0.4": { - "c_nf_per_km": 11, - "r_ohm_per_km": 1.8769, - "x_ohm_per_km": 0.35, - "max_i_ka": 0.105, - "type": "ol", - "q_mm2": 16, - "alpha": 0.00403 - }, - "24-AL1/4-ST1A 0.4": { - "c_nf_per_km": 11.25, - "r_ohm_per_km": 1.2012, - "x_ohm_per_km": 0.335, - "max_i_ka": 0.14, - "type": "ol", - "q_mm2": 24, - "alpha": 0.00403 - }, - "48-AL1/8-ST1A 0.4": { - "c_nf_per_km": 12.2, - "r_ohm_per_km": 0.5939, - "x_ohm_per_km": 0.3, - "max_i_ka": 0.21, - "type": "ol", - "q_mm2": 48, - "alpha": 0.00403 - }, - "94-AL1/15-ST1A 0.4": { - "c_nf_per_km": 13.2, - "r_ohm_per_km": 0.306, - "x_ohm_per_km": 0.29, - "max_i_ka": 0.35, - "type": "ol", - "q_mm2": 94, - "alpha": 0.00403 - }, - "34-AL1/6-ST1A 10.0": { - "c_nf_per_km": 9.7, - "r_ohm_per_km": 0.8342, - "x_ohm_per_km": 0.36, - "max_i_ka": 0.17, - "type": "ol", - "q_mm2": 34, - "alpha": 0.00403 - }, - "48-AL1/8-ST1A 10.0": { - "c_nf_per_km": 10.1, - "r_ohm_per_km": 0.5939, - "x_ohm_per_km": 0.35, - "max_i_ka": 0.21, - "type": "ol", - "q_mm2": 48, - "alpha": 0.00403 - }, - "70-AL1/11-ST1A 10.0": { - "c_nf_per_km": 10.4, - "r_ohm_per_km": 0.4132, - "x_ohm_per_km": 0.339, - "max_i_ka": 0.29, - "type": "ol", - "q_mm2": 70, - "alpha": 0.00403 - }, - "94-AL1/15-ST1A 10.0": { - "c_nf_per_km": 10.75, - "r_ohm_per_km": 0.306, - "x_ohm_per_km": 0.33, - "max_i_ka": 0.35, - "type": "ol", - "q_mm2": 94, - "alpha": 0.00403 - }, - "122-AL1/20-ST1A 10.0": { - "c_nf_per_km": 11.1, - "r_ohm_per_km": 0.2376, - "x_ohm_per_km": 0.323, - "max_i_ka": 0.41, - "type": "ol", - "q_mm2": 122, - "alpha": 0.00403 - }, - "149-AL1/24-ST1A 10.0": { - "c_nf_per_km": 11.25, - "r_ohm_per_km": 0.194, - "x_ohm_per_km": 0.315, - "max_i_ka": 0.47, - "type": "ol", - "q_mm2": 149, - "alpha": 0.00403 - }, - "34-AL1/6-ST1A 20.0": { - "c_nf_per_km": 9.15, - "r_ohm_per_km": 0.8342, - "x_ohm_per_km": 0.382, - "max_i_ka": 0.17, - "type": "ol", - "q_mm2": 34, - "alpha": 0.00403 - }, - "48-AL1/8-ST1A 20.0": { - "c_nf_per_km": 9.5, - "r_ohm_per_km": 0.5939, - "x_ohm_per_km": 0.372, - "max_i_ka": 0.21, - "type": "ol", - "q_mm2": 48, - "alpha": 0.00403 - }, - "70-AL1/11-ST1A 20.0": { - "c_nf_per_km": 9.7, - "r_ohm_per_km": 0.4132, - "x_ohm_per_km": 0.36, - "max_i_ka": 0.29, - "type": "ol", - "q_mm2": 70, - "alpha": 0.00403 - }, - "94-AL1/15-ST1A 20.0": { - "c_nf_per_km": 10, - "r_ohm_per_km": 0.306, - "x_ohm_per_km": 0.35, - "max_i_ka": 0.35, - "type": "ol", - "q_mm2": 94, - "alpha": 0.00403 - }, - "122-AL1/20-ST1A 20.0": { - "c_nf_per_km": 10.3, - "r_ohm_per_km": 0.2376, - "x_ohm_per_km": 0.344, - "max_i_ka": 0.41, - "type": "ol", - "q_mm2": 122, - "alpha": 0.00403 - }, - "149-AL1/24-ST1A 20.0": { - "c_nf_per_km": 10.5, - "r_ohm_per_km": 0.194, - "x_ohm_per_km": 0.337, - "max_i_ka": 0.47, - "type": "ol", - "q_mm2": 149, - "alpha": 0.00403 - }, - "184-AL1/30-ST1A 20.0": { - "c_nf_per_km": 10.75, - "r_ohm_per_km": 0.1571, - "x_ohm_per_km": 0.33, - "max_i_ka": 0.535, - "type": "ol", - "q_mm2": 184, - "alpha": 0.00403 - }, - "243-AL1/39-ST1A 20.0": { - "c_nf_per_km": 11, - "r_ohm_per_km": 0.1188, - "x_ohm_per_km": 0.32, - "max_i_ka": 0.645, - "type": "ol", - "q_mm2": 243, - "alpha": 0.00403 - }, - "48-AL1/8-ST1A 110.0": { - "c_nf_per_km": 8, - "r_ohm_per_km": 0.5939, - "x_ohm_per_km": 0.46, - "max_i_ka": 0.21, - "type": "ol", - "q_mm2": 48, - "alpha": 0.00403 - }, - "70-AL1/11-ST1A 110.0": { - "c_nf_per_km": 8.4, - "r_ohm_per_km": 0.4132, - "x_ohm_per_km": 0.45, - "max_i_ka": 0.29, - "type": "ol", - "q_mm2": 70, - "alpha": 0.00403 - }, - "94-AL1/15-ST1A 110.0": { - "c_nf_per_km": 8.65, - "r_ohm_per_km": 0.306, - "x_ohm_per_km": 0.44, - "max_i_ka": 0.35, - "type": "ol", - "q_mm2": 94, - "alpha": 0.00403 - }, - "122-AL1/20-ST1A 110.0": { - "c_nf_per_km": 8.5, - "r_ohm_per_km": 0.2376, - "x_ohm_per_km": 0.43, - "max_i_ka": 0.41, - "type": "ol", - "q_mm2": 122, - "alpha": 0.00403 - }, - "149-AL1/24-ST1A 110.0": { - "c_nf_per_km": 8.75, - "r_ohm_per_km": 0.194, - "x_ohm_per_km": 0.41, - "max_i_ka": 0.47, - "type": "ol", - "q_mm2": 149, - "alpha": 0.00403 - }, - "184-AL1/30-ST1A 110.0": { - "c_nf_per_km": 8.8, - "r_ohm_per_km": 0.1571, - "x_ohm_per_km": 0.4, - "max_i_ka": 0.535, - "type": "ol", - "q_mm2": 184, - "alpha": 0.00403 - }, - "243-AL1/39-ST1A 110.0": { - "c_nf_per_km": 9, - "r_ohm_per_km": 0.1188, - "x_ohm_per_km": 0.39, - "max_i_ka": 0.645, - "type": "ol", - "q_mm2": 243, - "alpha": 0.00403 - }, - "305-AL1/39-ST1A 110.0": { - "c_nf_per_km": 9.2, - "r_ohm_per_km": 0.0949, - "x_ohm_per_km": 0.38, - "max_i_ka": 0.74, - "type": "ol", - "q_mm2": 305, - "alpha": 0.00403 - }, - "490-AL1/64-ST1A 110.0": { - "c_nf_per_km": 9.75, - "r_ohm_per_km": 0.059, - "x_ohm_per_km": 0.37, - "max_i_ka": 0.96, - "type": "ol", - "q_mm2": 490, - "alpha": 0.00403 - }, - "679-AL1/86-ST1A 110.0": { - "c_nf_per_km": 9.95, - "r_ohm_per_km": 0.042, - "x_ohm_per_km": 0.36, - "max_i_ka": 1.15, - "type": "ol", - "q_mm2": 679, - "alpha": 0.00403 - }, - "490-AL1/64-ST1A 220.0": { - "c_nf_per_km": 10, - "r_ohm_per_km": 0.059, - "x_ohm_per_km": 0.285, - "max_i_ka": 0.96, - "type": "ol", - "q_mm2": 490, - "alpha": 0.00403 - }, - "679-AL1/86-ST1A 220.0": { - "c_nf_per_km": 11.7, - "r_ohm_per_km": 0.042, - "x_ohm_per_km": 0.275, - "max_i_ka": 1.15, - "type": "ol", - "q_mm2": 679, - "alpha": 0.00403 - }, - "490-AL1/64-ST1A 380.0": { - "c_nf_per_km": 11, - "r_ohm_per_km": 0.059, - "x_ohm_per_km": 0.253, - "max_i_ka": 0.96, - "type": "ol", - "q_mm2": 490, - "alpha": 0.00403 - }, - "679-AL1/86-ST1A 380.0": { - "c_nf_per_km": 14.6, - "r_ohm_per_km": 0.042, - "x_ohm_per_km": 0.25, - "max_i_ka": 1.15, - "type": "ol", - "q_mm2": 679, - "alpha": 0.00403 - } - }, - "trafo": { - "160 MVA 380/110 kV": { - "i0_percent": 0.06, - "pfe_kw": 60, - "vkr_percent": 0.25, - "sn_mva": 160, - "vn_lv_kv": 110.0, - "vn_hv_kv": 380.0, - "vk_percent": 12.2, - "shift_degree": 0, - "vector_group": "Yy0", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -9, - "tap_max": 9, - "tap_step_degree": 0, - "tap_step_percent": 1.5, - "tap_phase_shifter": false - }, - "100 MVA 220/110 kV": { - "i0_percent": 0.06, - "pfe_kw": 55, - "vkr_percent": 0.26, - "sn_mva": 100, - "vn_lv_kv": 110.0, - "vn_hv_kv": 220.0, - "vk_percent": 12.0, - "shift_degree": 0, - "vector_group": "Yy0", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -9, - "tap_max": 9, - "tap_step_degree": 0, - "tap_step_percent": 1.5, - "tap_phase_shifter": false - }, - "63 MVA 110/20 kV": { - "i0_percent": 0.04, - "pfe_kw": 22, - "vkr_percent": 0.32, - "sn_mva": 63, - "vn_lv_kv": 20.0, - "vn_hv_kv": 110.0, - "vk_percent": 18, - "shift_degree": 150, - "vector_group": "YNd5", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -9, - "tap_max": 9, - "tap_step_degree": 0, - "tap_step_percent": 1.5, - "tap_phase_shifter": false - }, - "40 MVA 110/20 kV": { - "i0_percent": 0.05, - "pfe_kw": 18, - "vkr_percent": 0.34, - "sn_mva": 40, - "vn_lv_kv": 20.0, - "vn_hv_kv": 110.0, - "vk_percent": 16.2, - "shift_degree": 150, - "vector_group": "YNd5", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -9, - "tap_max": 9, - "tap_step_degree": 0, - "tap_step_percent": 1.5, - "tap_phase_shifter": false - }, - "25 MVA 110/20 kV": { - "i0_percent": 0.07, - "pfe_kw": 14, - "vkr_percent": 0.41, - "sn_mva": 25, - "vn_lv_kv": 20.0, - "vn_hv_kv": 110.0, - "vk_percent": 12, - "shift_degree": 150, - "vector_group": "YNd5", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -9, - "tap_max": 9, - "tap_step_degree": 0, - "tap_step_percent": 1.5, - "tap_phase_shifter": false - }, - "63 MVA 110/10 kV": { - "sn_mva": 63, - "vn_hv_kv": 110, - "vn_lv_kv": 10, - "vk_percent": 18, - "vkr_percent": 0.32, - "pfe_kw": 22, - "i0_percent": 0.04, - "shift_degree": 150, - "vector_group": "YNd5", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -9, - "tap_max": 9, - "tap_step_degree": 0, - "tap_step_percent": 1.5, - "tap_phase_shifter": false - }, - "40 MVA 110/10 kV": { - "sn_mva": 40, - "vn_hv_kv": 110, - "vn_lv_kv": 10, - "vk_percent": 16.2, - "vkr_percent": 0.34, - "pfe_kw": 18, - "i0_percent": 0.05, - "shift_degree": 150, - "vector_group": "YNd5", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -9, - "tap_max": 9, - "tap_step_degree": 0, - "tap_step_percent": 1.5, - "tap_phase_shifter": false - }, - "25 MVA 110/10 kV": { - "sn_mva": 25, - "vn_hv_kv": 110, - "vn_lv_kv": 10, - "vk_percent": 12, - "vkr_percent": 0.41, - "pfe_kw": 14, - "i0_percent": 0.07, - "shift_degree": 150, - "vector_group": "YNd5", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -9, - "tap_max": 9, - "tap_step_degree": 0, - "tap_step_percent": 1.5, - "tap_phase_shifter": false - }, - "0.25 MVA 20/0.4 kV": { - "sn_mva": 0.25, - "vn_hv_kv": 20, - "vn_lv_kv": 0.4, - "vk_percent": 6, - "vkr_percent": 1.44, - "pfe_kw": 0.8, - "i0_percent": 0.32, - "shift_degree": 150, - "vector_group": "Yzn5", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -2, - "tap_max": 2, - "tap_step_degree": 0, - "tap_step_percent": 2.5, - "tap_phase_shifter": false - }, - "0.4 MVA 20/0.4 kV": { - "sn_mva": 0.4, - "vn_hv_kv": 20, - "vn_lv_kv": 0.4, - "vk_percent": 6, - "vkr_percent": 1.425, - "pfe_kw": 1.35, - "i0_percent": 0.3375, - "shift_degree": 150, - "vector_group": "Dyn5", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -2, - "tap_max": 2, - "tap_step_degree": 0, - "tap_step_percent": 2.5, - "tap_phase_shifter": false - }, - "0.63 MVA 20/0.4 kV": { - "sn_mva": 0.63, - "vn_hv_kv": 20, - "vn_lv_kv": 0.4, - "vk_percent": 6, - "vkr_percent": 1.206, - "pfe_kw": 1.65, - "i0_percent": 0.2619, - "shift_degree": 150, - "vector_group": "Dyn5", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -2, - "tap_max": 2, - "tap_step_degree": 0, - "tap_step_percent": 2.5, - "tap_phase_shifter": false - }, - "0.25 MVA 10/0.4 kV": { - "sn_mva": 0.25, - "vn_hv_kv": 10, - "vn_lv_kv": 0.4, - "vk_percent": 4, - "vkr_percent": 1.2, - "pfe_kw": 0.6, - "i0_percent": 0.24, - "shift_degree": 150, - "vector_group": "Dyn5", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -2, - "tap_max": 2, - "tap_step_degree": 0, - "tap_step_percent": 2.5, - "tap_phase_shifter": false - }, - "0.4 MVA 10/0.4 kV": { - "sn_mva": 0.4, - "vn_hv_kv": 10, - "vn_lv_kv": 0.4, - "vk_percent": 4, - "vkr_percent": 1.325, - "pfe_kw": 0.95, - "i0_percent": 0.2375, - "shift_degree": 150, - "vector_group": "Dyn5", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -2, - "tap_max": 2, - "tap_step_degree": 0, - "tap_step_percent": 2.5, - "tap_phase_shifter": false - }, - "0.63 MVA 10/0.4 kV": { - "sn_mva": 0.63, - "vn_hv_kv": 10, - "vn_lv_kv": 0.4, - "vk_percent": 4, - "vkr_percent": 1.0794, - "pfe_kw": 1.18, - "i0_percent": 0.1873, - "shift_degree": 150, - "vector_group": "Dyn5", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -2, - "tap_max": 2, - "tap_step_degree": 0, - "tap_step_percent": 2.5, - "tap_phase_shifter": false - } - }, - "trafo3w": { - "63/25/38 MVA 110/20/10 kV": { - "sn_hv_mva": 63, - "sn_mv_mva": 25, - "sn_lv_mva": 38, - "vn_hv_kv": 110, - "vn_mv_kv": 20, - "vn_lv_kv": 10, - "vk_hv_percent": 10.4, - "vk_mv_percent": 10.4, - "vk_lv_percent": 10.4, - "vkr_hv_percent": 0.28, - "vkr_mv_percent": 0.32, - "vkr_lv_percent": 0.35, - "pfe_kw": 35, - "i0_percent": 0.89, - "shift_mv_degree": 0, - "shift_lv_degree": 0, - "vector_group": "YN0yn0yn0", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -10, - "tap_max": 10, - "tap_step_percent": 1.2 - }, - "63/25/38 MVA 110/10/10 kV": { - "sn_hv_mva": 63, - "sn_mv_mva": 25, - "sn_lv_mva": 38, - "vn_hv_kv": 110, - "vn_mv_kv": 10, - "vn_lv_kv": 10, - "vk_hv_percent": 10.4, - "vk_mv_percent": 10.4, - "vk_lv_percent": 10.4, - "vkr_hv_percent": 0.28, - "vkr_mv_percent": 0.32, - "vkr_lv_percent": 0.35, - "pfe_kw": 35, - "i0_percent": 0.89, - "shift_mv_degree": 0, - "shift_lv_degree": 0, - "vector_group": "YN0yn0yn0", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -10, - "tap_max": 10, - "tap_step_percent": 1.2 - } - } - }, - "res_bus": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"vm_pu\",\"va_degree\",\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "vm_pu": "float64", - "va_degree": "float64", - "p_mw": "float64", - "q_mvar": "float64" - } - }, - "res_line": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"p_from_mw\",\"q_from_mvar\",\"p_to_mw\",\"q_to_mvar\",\"pl_mw\",\"ql_mvar\",\"i_from_ka\",\"i_to_ka\",\"i_ka\",\"vm_from_pu\",\"va_from_degree\",\"vm_to_pu\",\"va_to_degree\",\"loading_percent\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "p_from_mw": "float64", - "q_from_mvar": "float64", - "p_to_mw": "float64", - "q_to_mvar": "float64", - "pl_mw": "float64", - "ql_mvar": "float64", - "i_from_ka": "float64", - "i_to_ka": "float64", - "i_ka": "float64", - "vm_from_pu": "float64", - "va_from_degree": "float64", - "vm_to_pu": "float64", - "va_to_degree": "float64", - "loading_percent": "float64" - } - }, - "res_trafo": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"p_hv_mw\",\"q_hv_mvar\",\"p_lv_mw\",\"q_lv_mvar\",\"pl_mw\",\"ql_mvar\",\"i_hv_ka\",\"i_lv_ka\",\"vm_hv_pu\",\"va_hv_degree\",\"vm_lv_pu\",\"va_lv_degree\",\"loading_percent\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "p_hv_mw": "float64", - "q_hv_mvar": "float64", - "p_lv_mw": "float64", - "q_lv_mvar": "float64", - "pl_mw": "float64", - "ql_mvar": "float64", - "i_hv_ka": "float64", - "i_lv_ka": "float64", - "vm_hv_pu": "float64", - "va_hv_degree": "float64", - "vm_lv_pu": "float64", - "va_lv_degree": "float64", - "loading_percent": "float64" - } - }, - "res_trafo3w": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"p_hv_mw\",\"q_hv_mvar\",\"p_mv_mw\",\"q_mv_mvar\",\"p_lv_mw\",\"q_lv_mvar\",\"pl_mw\",\"ql_mvar\",\"i_hv_ka\",\"i_mv_ka\",\"i_lv_ka\",\"vm_hv_pu\",\"va_hv_degree\",\"vm_mv_pu\",\"va_mv_degree\",\"vm_lv_pu\",\"va_lv_degree\",\"va_internal_degree\",\"vm_internal_pu\",\"loading_percent\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "p_hv_mw": "float64", - "q_hv_mvar": "float64", - "p_mv_mw": "float64", - "q_mv_mvar": "float64", - "p_lv_mw": "float64", - "q_lv_mvar": "float64", - "pl_mw": "float64", - "ql_mvar": "float64", - "i_hv_ka": "float64", - "i_mv_ka": "float64", - "i_lv_ka": "float64", - "vm_hv_pu": "float64", - "va_hv_degree": "float64", - "vm_mv_pu": "float64", - "va_mv_degree": "float64", - "vm_lv_pu": "float64", - "va_lv_degree": "float64", - "va_internal_degree": "float64", - "vm_internal_pu": "float64", - "loading_percent": "float64" - } - }, - "res_impedance": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"p_from_mw\",\"q_from_mvar\",\"p_to_mw\",\"q_to_mvar\",\"pl_mw\",\"ql_mvar\",\"i_from_ka\",\"i_to_ka\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "p_from_mw": "float64", - "q_from_mvar": "float64", - "p_to_mw": "float64", - "q_to_mvar": "float64", - "pl_mw": "float64", - "ql_mvar": "float64", - "i_from_ka": "float64", - "i_to_ka": "float64" - } - }, - "res_ext_grid": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "p_mw": "float64", - "q_mvar": "float64" - } - }, - "res_load": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "p_mw": "float64", - "q_mvar": "float64" - } - }, - "res_sgen": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "p_mw": "float64", - "q_mvar": "float64" - } - }, - "res_storage": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "p_mw": "float64", - "q_mvar": "float64" - } - }, - "res_shunt": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"p_mw\",\"q_mvar\",\"vm_pu\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "p_mw": "float64", - "q_mvar": "float64", - "vm_pu": "float64" - } - }, - "res_gen": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"p_mw\",\"q_mvar\",\"va_degree\",\"vm_pu\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "p_mw": "float64", - "q_mvar": "float64", - "va_degree": "float64", - "vm_pu": "float64" - } - }, - "res_ward": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"p_mw\",\"q_mvar\",\"vm_pu\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "p_mw": "float64", - "q_mvar": "float64", - "vm_pu": "float64" - } - }, - "res_xward": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"p_mw\",\"q_mvar\",\"vm_pu\",\"va_internal_degree\",\"vm_internal_pu\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "p_mw": "float64", - "q_mvar": "float64", - "vm_pu": "float64", - "va_internal_degree": "float64", - "vm_internal_pu": "float64" - } - }, - "res_dcline": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"p_from_mw\",\"q_from_mvar\",\"p_to_mw\",\"q_to_mvar\",\"pl_mw\",\"vm_from_pu\",\"va_from_degree\",\"vm_to_pu\",\"va_to_degree\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "p_from_mw": "float64", - "q_from_mvar": "float64", - "p_to_mw": "float64", - "q_to_mvar": "float64", - "pl_mw": "float64", - "vm_from_pu": "float64", - "va_from_degree": "float64", - "vm_to_pu": "float64", - "va_to_degree": "float64" - } - }, - "user_pf_options": {} - } -} \ No newline at end of file + "_module": "pandapower.auxiliary", + "_class": "pandapowerNet", + "_object": "{\"OPF_converged\":false,\"bus\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"in_service\\\",\\\"max_vm_pu\\\",\\\"min_vm_pu\\\",\\\"name\\\",\\\"type\\\",\\\"vn_kv\\\",\\\"zone\\\"],\\\"index\\\":[0,1,10,11,12,13,2,3,4,5,6,7,8,9],\\\"data\\\":[[true,1.06,0.94,1,\\\"b\\\",135.0,1.0],[true,1.06,0.94,2,\\\"b\\\",135.0,1.0],[true,1.06,0.94,11,\\\"b\\\",0.208,1.0],[true,1.06,0.94,12,\\\"b\\\",0.208,1.0],[true,1.06,0.94,13,\\\"b\\\",0.208,1.0],[true,1.06,0.94,14,\\\"b\\\",0.208,1.0],[true,1.06,0.94,3,\\\"b\\\",135.0,1.0],[true,1.06,0.94,4,\\\"b\\\",135.0,1.0],[true,1.06,0.94,5,\\\"b\\\",135.0,1.0],[true,1.06,0.94,6,\\\"b\\\",0.208,1.0],[true,1.06,0.94,7,\\\"b\\\",14.0,1.0],[true,1.06,0.94,8,\\\"b\\\",12.0,1.0],[true,1.06,0.94,9,\\\"b\\\",0.208,1.0],[true,1.06,0.94,10,\\\"b\\\",0.208,1.0]]}\",\n \"dtype\": {\n \"in_service\": \"bool\",\n \"max_vm_pu\": \"float64\",\n \"min_vm_pu\": \"float64\",\n \"name\": \"object\",\n \"type\": \"object\",\n \"vn_kv\": \"float64\",\n \"zone\": \"object\"\n },\n \"orient\": \"split\"\n},\"bus_geodata\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"x\\\",\\\"y\\\",\\\"coords\\\"],\\\"index\\\":[0,1,10,11,12,13,2,3,4,5,6,7,8,9],\\\"data\\\":[[1.9673949894,-0.9610198739,null],[2.9779852289,-1.0412882366,null],[1.8366837619,1.0890065149,null],[2.3371166416,2.3091630377,null],[3.3094922817,2.1179802998,null],[4.3962052866,1.6847581464,null],[3.780660539,-1.6066859687,null],[3.8337344898,-0.4914657254,null],[2.6937067209,-0.095882852,null],[2.5321180205,1.2056156419,null],[4.8721406581,-0.2692952825,null],[5.9042747731,-0.5402149495,null],[4.274948799,0.5335379916,null],[3.2723067024,0.9619849305,null]]}\",\n \"dtype\": {\n \"x\": \"float64\",\n \"y\": \"float64\",\n \"coords\": \"object\"\n },\n \"orient\": \"split\"\n},\"converged\":true,\"dcline\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"name\\\",\\\"from_bus\\\",\\\"to_bus\\\",\\\"p_mw\\\",\\\"loss_percent\\\",\\\"loss_mw\\\",\\\"vm_from_pu\\\",\\\"vm_to_pu\\\",\\\"max_p_mw\\\",\\\"min_q_from_mvar\\\",\\\"min_q_to_mvar\\\",\\\"max_q_from_mvar\\\",\\\"max_q_to_mvar\\\",\\\"in_service\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"name\": \"object\",\n \"from_bus\": \"uint32\",\n \"to_bus\": \"uint32\",\n \"p_mw\": \"float64\",\n \"loss_percent\": \"float64\",\n \"loss_mw\": \"float64\",\n \"vm_from_pu\": \"float64\",\n \"vm_to_pu\": \"float64\",\n \"max_p_mw\": \"float64\",\n \"min_q_from_mvar\": \"float64\",\n \"min_q_to_mvar\": \"float64\",\n \"max_q_from_mvar\": \"float64\",\n \"max_q_to_mvar\": \"float64\",\n \"in_service\": \"bool\"\n },\n \"orient\": \"split\"\n},\"ext_grid\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"bus\\\",\\\"in_service\\\",\\\"name\\\",\\\"va_degree\\\",\\\"vm_pu\\\",\\\"max_p_mw\\\",\\\"min_p_mw\\\",\\\"max_q_mvar\\\",\\\"min_q_mvar\\\"],\\\"index\\\":[0],\\\"data\\\":[[0,true,null,0.0,1.06,332.400000000000034,0.0,10.0,0.0]]}\",\n \"dtype\": {\n \"bus\": \"uint32\",\n \"in_service\": \"bool\",\n \"name\": \"object\",\n \"va_degree\": \"float64\",\n \"vm_pu\": \"float64\",\n \"max_p_mw\": \"float64\",\n \"min_p_mw\": \"float64\",\n \"max_q_mvar\": \"float64\",\n \"min_q_mvar\": \"float64\"\n },\n \"orient\": \"split\"\n},\"f_hz\":60,\"gen\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"bus\\\",\\\"controllable\\\",\\\"in_service\\\",\\\"name\\\",\\\"p_mw\\\",\\\"scaling\\\",\\\"sn_mva\\\",\\\"type\\\",\\\"vm_pu\\\",\\\"slack\\\",\\\"max_p_mw\\\",\\\"min_p_mw\\\",\\\"max_q_mvar\\\",\\\"min_q_mvar\\\"],\\\"index\\\":[0,1,2,3],\\\"data\\\":[[1,true,true,null,40.0,1.0,null,null,1.045,false,140.0,0.0,50.0,-40.0],[2,true,true,null,0.0,1.0,null,null,1.01,false,100.0,0.0,40.0,0.0],[5,true,true,null,0.0,1.0,null,null,1.07,false,100.0,0.0,24.0,-6.0],[7,true,true,null,0.0,1.0,null,null,1.09,false,100.0,0.0,24.0,-6.0]]}\",\n \"dtype\": {\n \"bus\": \"uint32\",\n \"controllable\": \"bool\",\n \"in_service\": \"bool\",\n \"name\": \"object\",\n \"p_mw\": \"float64\",\n \"scaling\": \"float64\",\n \"sn_mva\": \"float64\",\n \"type\": \"object\",\n \"vm_pu\": \"float64\",\n \"slack\": \"bool\",\n \"max_p_mw\": \"float64\",\n \"min_p_mw\": \"float64\",\n \"max_q_mvar\": \"float64\",\n \"min_q_mvar\": \"float64\"\n },\n \"orient\": \"split\"\n},\"impedance\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"name\\\",\\\"from_bus\\\",\\\"to_bus\\\",\\\"rft_pu\\\",\\\"xft_pu\\\",\\\"rtf_pu\\\",\\\"xtf_pu\\\",\\\"sn_mva\\\",\\\"in_service\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"name\": \"object\",\n \"from_bus\": \"uint32\",\n \"to_bus\": \"uint32\",\n \"rft_pu\": \"float64\",\n \"xft_pu\": \"float64\",\n \"rtf_pu\": \"float64\",\n \"xtf_pu\": \"float64\",\n \"sn_mva\": \"float64\",\n \"in_service\": \"bool\"\n },\n \"orient\": \"split\"\n},\"line\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"c_nf_per_km\\\",\\\"df\\\",\\\"from_bus\\\",\\\"g_us_per_km\\\",\\\"in_service\\\",\\\"length_km\\\",\\\"max_i_ka\\\",\\\"max_loading_percent\\\",\\\"name\\\",\\\"parallel\\\",\\\"r_ohm_per_km\\\",\\\"std_type\\\",\\\"to_bus\\\",\\\"type\\\",\\\"x_ohm_per_km\\\"],\\\"index\\\":[0,1,10,11,12,13,14,2,3,4,5,6,7,8,9],\\\"data\\\":[[768.484773228356175,1.0,0,0.0,true,1.0,42.339019740572553,100.0,null,1,3.532005,null,1,\\\"ol\\\",10.783732499999999],[716.088084144604636,1.0,0,0.0,true,1.0,42.339019740572553,100.0,null,1,9.8469675,null,4,\\\"ol\\\",40.649039999999999],[0.0,1.0,8,0.0,true,1.0,27479.65223546776906,100.0,null,1,0.0000137622784,null,9,\\\"ol\\\",0.00003655808],[0.0,1.0,8,0.0,true,1.0,27479.65223546776906,100.0,null,1,0.0000549928704,null,13,\\\"ol\\\",0.0001169772032],[0.0,1.0,9,0.0,true,1.0,27479.65223546776906,100.0,null,1,0.000035498112,null,10,\\\"ol\\\",0.0000830971648],[0.0,1.0,11,0.0,true,1.0,27479.65223546776906,100.0,null,1,0.0000955788288,null,12,\\\"ol\\\",0.0000864760832],[0.0,1.0,12,0.0,true,1.0,27479.65223546776906,100.0,null,1,0.0000739511552,null,13,\\\"ol\\\",0.0001505673728],[637.49305051897727,1.0,1,0.0,true,1.0,42.339019740572553,100.0,null,1,8.5639275,null,2,\\\"ol\\\",36.080032500000002],[494.857619124320308,1.0,1,0.0,true,1.0,42.339019740572553,100.0,null,1,10.5905475,null,3,\\\"ol\\\",32.134320000000002],[503.590400638278879,1.0,1,0.0,true,1.0,42.339019740572553,100.0,null,1,10.379137500000001,null,4,\\\"ol\\\",31.689630000000001],[186.299338964449987,1.0,2,0.0,true,1.0,42.339019740572553,100.0,null,1,12.2125725,null,3,\\\"ol\\\",31.1702175],[0.0,1.0,3,0.0,true,1.0,42.339019740572553,100.0,null,1,2.4330375,null,4,\\\"ol\\\",7.6745475],[0.0,1.0,5,0.0,true,1.0,27479.65223546776906,100.0,null,1,0.0000410921472,null,10,\\\"ol\\\",0.000086052096],[0.0,1.0,5,0.0,true,1.0,27479.65223546776906,100.0,null,1,0.0000531757824,null,11,\\\"ol\\\",0.0001106736384],[0.0,1.0,5,0.0,true,1.0,27479.65223546776906,100.0,null,1,0.000028619136,null,12,\\\"ol\\\",0.0000563600128]]}\",\n \"dtype\": {\n \"c_nf_per_km\": \"float64\",\n \"df\": \"float64\",\n \"from_bus\": \"uint32\",\n \"g_us_per_km\": \"float64\",\n \"in_service\": \"bool\",\n \"length_km\": \"float64\",\n \"max_i_ka\": \"float64\",\n \"max_loading_percent\": \"float64\",\n \"name\": \"object\",\n \"parallel\": \"uint32\",\n \"r_ohm_per_km\": \"float64\",\n \"std_type\": \"object\",\n \"to_bus\": \"uint32\",\n \"type\": \"object\",\n \"x_ohm_per_km\": \"float64\"\n },\n \"orient\": \"split\"\n},\"line_geodata\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"coords\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"coords\": \"object\"\n },\n \"orient\": \"split\"\n},\"load\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"bus\\\",\\\"const_i_percent\\\",\\\"const_z_percent\\\",\\\"controllable\\\",\\\"in_service\\\",\\\"name\\\",\\\"p_mw\\\",\\\"q_mvar\\\",\\\"scaling\\\",\\\"sn_mva\\\",\\\"type\\\"],\\\"index\\\":[0,1,10,2,3,4,5,6,7,8,9],\\\"data\\\":[[1,0.0,0.0,false,true,null,21.699999999999999,12.699999999999999,1.0,null,null],[2,0.0,0.0,false,true,null,94.200000000000003,19.0,1.0,null,null],[13,0.0,0.0,false,true,null,14.9,5.0,1.0,null,null],[3,0.0,0.0,false,true,null,47.799999999999997,-3.9,1.0,null,null],[4,0.0,0.0,false,true,null,7.6,1.6,1.0,null,null],[5,0.0,0.0,false,true,null,11.199999999999999,7.5,1.0,null,null],[8,0.0,0.0,false,true,null,29.5,16.600000000000001,1.0,null,null],[9,0.0,0.0,false,true,null,9.0,5.8,1.0,null,null],[10,0.0,0.0,false,true,null,3.5,1.8,1.0,null,null],[11,0.0,0.0,false,true,null,6.1,1.6,1.0,null,null],[12,0.0,0.0,false,true,null,13.5,5.8,1.0,null,null]]}\",\n \"dtype\": {\n \"bus\": \"uint32\",\n \"const_i_percent\": \"float64\",\n \"const_z_percent\": \"float64\",\n \"controllable\": \"bool\",\n \"in_service\": \"bool\",\n \"name\": \"object\",\n \"p_mw\": \"float64\",\n \"q_mvar\": \"float64\",\n \"scaling\": \"float64\",\n \"sn_mva\": \"float64\",\n \"type\": \"object\"\n },\n \"orient\": \"split\"\n},\"measurement\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"name\\\",\\\"measurement_type\\\",\\\"element_type\\\",\\\"element\\\",\\\"value\\\",\\\"std_dev\\\",\\\"side\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"name\": \"object\",\n \"measurement_type\": \"object\",\n \"element_type\": \"object\",\n \"element\": \"uint32\",\n \"value\": \"float64\",\n \"std_dev\": \"float64\",\n \"side\": \"object\"\n },\n \"orient\": \"split\"\n},\"name\":\"\",\"poly_cost\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"element\\\",\\\"et\\\",\\\"cp0_eur\\\",\\\"cp1_eur_per_mw\\\",\\\"cp2_eur_per_mw2\\\",\\\"cq0_eur\\\",\\\"cq1_eur_per_mvar\\\",\\\"cq2_eur_per_mvar2\\\"],\\\"index\\\":[0,1,2,3,4],\\\"data\\\":[[0.0,\\\"ext_grid\\\",0.0,20.0,0.0430293,0.0,0.0,0.0],[0.0,\\\"gen\\\",0.0,20.0,0.25,0.0,0.0,0.0],[1.0,\\\"gen\\\",0.0,40.0,0.01,0.0,0.0,0.0],[2.0,\\\"gen\\\",0.0,40.0,0.01,0.0,0.0,0.0],[3.0,\\\"gen\\\",0.0,40.0,0.01,0.0,0.0,0.0]]}\",\n \"dtype\": {\n \"element\": \"object\",\n \"et\": \"object\",\n \"cp0_eur\": \"float64\",\n \"cp1_eur_per_mw\": \"float64\",\n \"cp2_eur_per_mw2\": \"float64\",\n \"cq0_eur\": \"float64\",\n \"cq1_eur_per_mvar\": \"float64\",\n \"cq2_eur_per_mvar2\": \"float64\"\n },\n \"orient\": \"split\"\n},\"pwl_cost\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"power_type\\\",\\\"element\\\",\\\"et\\\",\\\"points\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"power_type\": \"object\",\n \"element\": \"object\",\n \"et\": \"object\",\n \"points\": \"object\"\n },\n \"orient\": \"split\"\n},\"res_bus\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"vm_pu\\\",\\\"va_degree\\\",\\\"p_mw\\\",\\\"q_mvar\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"vm_pu\": \"float64\",\n \"va_degree\": \"float64\",\n \"p_mw\": \"float64\",\n \"q_mvar\": \"float64\"\n },\n \"orient\": \"split\"\n},\"res_dcline\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"p_from_mw\\\",\\\"q_from_mvar\\\",\\\"p_to_mw\\\",\\\"q_to_mvar\\\",\\\"pl_mw\\\",\\\"vm_from_pu\\\",\\\"va_from_degree\\\",\\\"vm_to_pu\\\",\\\"va_to_degree\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"p_from_mw\": \"float64\",\n \"q_from_mvar\": \"float64\",\n \"p_to_mw\": \"float64\",\n \"q_to_mvar\": \"float64\",\n \"pl_mw\": \"float64\",\n \"vm_from_pu\": \"float64\",\n \"va_from_degree\": \"float64\",\n \"vm_to_pu\": \"float64\",\n \"va_to_degree\": \"float64\"\n },\n \"orient\": \"split\"\n},\"res_ext_grid\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"p_mw\\\",\\\"q_mvar\\\"],\\\"index\\\":[0],\\\"data\\\":[[null,null]]}\",\n \"dtype\": {\n \"p_mw\": \"float64\",\n \"q_mvar\": \"float64\"\n },\n \"orient\": \"split\"\n},\"res_gen\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"p_mw\\\",\\\"q_mvar\\\",\\\"va_degree\\\",\\\"vm_pu\\\"],\\\"index\\\":[0,1,2,3],\\\"data\\\":[[null,null,null,null],[null,null,null,null],[null,null,null,null],[null,null,null,null]]}\",\n \"dtype\": {\n \"p_mw\": \"float64\",\n \"q_mvar\": \"float64\",\n \"va_degree\": \"float64\",\n \"vm_pu\": \"float64\"\n },\n \"orient\": \"split\"\n},\"res_impedance\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"p_from_mw\\\",\\\"q_from_mvar\\\",\\\"p_to_mw\\\",\\\"q_to_mvar\\\",\\\"pl_mw\\\",\\\"ql_mvar\\\",\\\"i_from_ka\\\",\\\"i_to_ka\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"p_from_mw\": \"float64\",\n \"q_from_mvar\": \"float64\",\n \"p_to_mw\": \"float64\",\n \"q_to_mvar\": \"float64\",\n \"pl_mw\": \"float64\",\n \"ql_mvar\": \"float64\",\n \"i_from_ka\": \"float64\",\n \"i_to_ka\": \"float64\"\n },\n \"orient\": \"split\"\n},\"res_line\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"p_from_mw\\\",\\\"q_from_mvar\\\",\\\"p_to_mw\\\",\\\"q_to_mvar\\\",\\\"pl_mw\\\",\\\"ql_mvar\\\",\\\"i_from_ka\\\",\\\"i_to_ka\\\",\\\"i_ka\\\",\\\"vm_from_pu\\\",\\\"va_from_degree\\\",\\\"vm_to_pu\\\",\\\"va_to_degree\\\",\\\"loading_percent\\\"],\\\"index\\\":[0,1,10,11,12,13,14,2,3,4,5,6,7,8,9],\\\"data\\\":[[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null]]}\",\n \"dtype\": {\n \"p_from_mw\": \"float64\",\n \"q_from_mvar\": \"float64\",\n \"p_to_mw\": \"float64\",\n \"q_to_mvar\": \"float64\",\n \"pl_mw\": \"float64\",\n \"ql_mvar\": \"float64\",\n \"i_from_ka\": \"float64\",\n \"i_to_ka\": \"float64\",\n \"i_ka\": \"float64\",\n \"vm_from_pu\": \"float64\",\n \"va_from_degree\": \"float64\",\n \"vm_to_pu\": \"float64\",\n \"va_to_degree\": \"float64\",\n \"loading_percent\": \"float64\"\n },\n \"orient\": \"split\"\n},\"res_load\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"p_mw\\\",\\\"q_mvar\\\"],\\\"index\\\":[0,1,10,2,3,4,5,6,7,8,9],\\\"data\\\":[[null,null],[null,null],[null,null],[null,null],[null,null],[null,null],[null,null],[null,null],[null,null],[null,null],[null,null]]}\",\n \"dtype\": {\n \"p_mw\": \"float64\",\n \"q_mvar\": \"float64\"\n },\n \"orient\": \"split\"\n},\"res_sgen\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"p_mw\\\",\\\"q_mvar\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"p_mw\": \"float64\",\n \"q_mvar\": \"float64\"\n },\n \"orient\": \"split\"\n},\"res_shunt\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"p_mw\\\",\\\"q_mvar\\\",\\\"vm_pu\\\"],\\\"index\\\":[0],\\\"data\\\":[[null,null,null]]}\",\n \"dtype\": {\n \"p_mw\": \"float64\",\n \"q_mvar\": \"float64\",\n \"vm_pu\": \"float64\"\n },\n \"orient\": \"split\"\n},\"res_storage\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"p_mw\\\",\\\"q_mvar\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"p_mw\": \"float64\",\n \"q_mvar\": \"float64\"\n },\n \"orient\": \"split\"\n},\"res_trafo\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"p_hv_mw\\\",\\\"q_hv_mvar\\\",\\\"p_lv_mw\\\",\\\"q_lv_mvar\\\",\\\"pl_mw\\\",\\\"ql_mvar\\\",\\\"i_hv_ka\\\",\\\"i_lv_ka\\\",\\\"vm_hv_pu\\\",\\\"va_hv_degree\\\",\\\"vm_lv_pu\\\",\\\"va_lv_degree\\\",\\\"loading_percent\\\"],\\\"index\\\":[0,1,2,3,4],\\\"data\\\":[[null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null]]}\",\n \"dtype\": {\n \"p_hv_mw\": \"float64\",\n \"q_hv_mvar\": \"float64\",\n \"p_lv_mw\": \"float64\",\n \"q_lv_mvar\": \"float64\",\n \"pl_mw\": \"float64\",\n \"ql_mvar\": \"float64\",\n \"i_hv_ka\": \"float64\",\n \"i_lv_ka\": \"float64\",\n \"vm_hv_pu\": \"float64\",\n \"va_hv_degree\": \"float64\",\n \"vm_lv_pu\": \"float64\",\n \"va_lv_degree\": \"float64\",\n \"loading_percent\": \"float64\"\n },\n \"orient\": \"split\"\n},\"res_trafo3w\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"p_hv_mw\\\",\\\"q_hv_mvar\\\",\\\"p_mv_mw\\\",\\\"q_mv_mvar\\\",\\\"p_lv_mw\\\",\\\"q_lv_mvar\\\",\\\"pl_mw\\\",\\\"ql_mvar\\\",\\\"i_hv_ka\\\",\\\"i_mv_ka\\\",\\\"i_lv_ka\\\",\\\"vm_hv_pu\\\",\\\"va_hv_degree\\\",\\\"vm_mv_pu\\\",\\\"va_mv_degree\\\",\\\"vm_lv_pu\\\",\\\"va_lv_degree\\\",\\\"va_internal_degree\\\",\\\"vm_internal_pu\\\",\\\"loading_percent\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"p_hv_mw\": \"float64\",\n \"q_hv_mvar\": \"float64\",\n \"p_mv_mw\": \"float64\",\n \"q_mv_mvar\": \"float64\",\n \"p_lv_mw\": \"float64\",\n \"q_lv_mvar\": \"float64\",\n \"pl_mw\": \"float64\",\n \"ql_mvar\": \"float64\",\n \"i_hv_ka\": \"float64\",\n \"i_mv_ka\": \"float64\",\n \"i_lv_ka\": \"float64\",\n \"vm_hv_pu\": \"float64\",\n \"va_hv_degree\": \"float64\",\n \"vm_mv_pu\": \"float64\",\n \"va_mv_degree\": \"float64\",\n \"vm_lv_pu\": \"float64\",\n \"va_lv_degree\": \"float64\",\n \"va_internal_degree\": \"float64\",\n \"vm_internal_pu\": \"float64\",\n \"loading_percent\": \"float64\"\n },\n \"orient\": \"split\"\n},\"res_ward\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"p_mw\\\",\\\"q_mvar\\\",\\\"vm_pu\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"p_mw\": \"float64\",\n \"q_mvar\": \"float64\",\n \"vm_pu\": \"float64\"\n },\n \"orient\": \"split\"\n},\"res_xward\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"p_mw\\\",\\\"q_mvar\\\",\\\"vm_pu\\\",\\\"va_internal_degree\\\",\\\"vm_internal_pu\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"p_mw\": \"float64\",\n \"q_mvar\": \"float64\",\n \"vm_pu\": \"float64\",\n \"va_internal_degree\": \"float64\",\n \"vm_internal_pu\": \"float64\"\n },\n \"orient\": \"split\"\n},\"sgen\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"name\\\",\\\"bus\\\",\\\"p_mw\\\",\\\"q_mvar\\\",\\\"sn_mva\\\",\\\"scaling\\\",\\\"in_service\\\",\\\"type\\\",\\\"current_source\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"name\": \"object\",\n \"bus\": \"int64\",\n \"p_mw\": \"float64\",\n \"q_mvar\": \"float64\",\n \"sn_mva\": \"float64\",\n \"scaling\": \"float64\",\n \"in_service\": \"bool\",\n \"type\": \"object\",\n \"current_source\": \"bool\"\n },\n \"orient\": \"split\"\n},\"shunt\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"bus\\\",\\\"name\\\",\\\"q_mvar\\\",\\\"p_mw\\\",\\\"vn_kv\\\",\\\"step\\\",\\\"max_step\\\",\\\"in_service\\\"],\\\"index\\\":[0],\\\"data\\\":[[8,null,-19.0,0.0,0.208,1,1,true]]}\",\n \"dtype\": {\n \"bus\": \"uint32\",\n \"name\": \"object\",\n \"q_mvar\": \"float64\",\n \"p_mw\": \"float64\",\n \"vn_kv\": \"float64\",\n \"step\": \"uint32\",\n \"max_step\": \"uint32\",\n \"in_service\": \"bool\"\n },\n \"orient\": \"split\"\n},\"sn_mva\":1.0,\"std_types\":{\n \"line\": {\n \"NAYY 4x150 SE\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.208,\n \"q_mm2\": 150,\n \"x_ohm_per_km\": 0.08,\n \"c_nf_per_km\": 261.0,\n \"max_i_ka\": 0.27\n },\n \"70-AL1/11-ST1A 20.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.4132,\n \"q_mm2\": 70,\n \"x_ohm_per_km\": 0.36,\n \"c_nf_per_km\": 9.7,\n \"max_i_ka\": 0.29\n },\n \"NA2XS2Y 1x70 RM/25 6/10 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.443,\n \"q_mm2\": 70,\n \"x_ohm_per_km\": 0.123,\n \"c_nf_per_km\": 280.0,\n \"max_i_ka\": 0.217\n },\n \"N2XS(FL)2Y 1x300 RM/35 64/110 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.06,\n \"q_mm2\": 300,\n \"x_ohm_per_km\": 0.144,\n \"c_nf_per_km\": 144.0,\n \"max_i_ka\": 0.588\n },\n \"NA2XS2Y 1x120 RM/25 6/10 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.253,\n \"q_mm2\": 120,\n \"x_ohm_per_km\": 0.113,\n \"c_nf_per_km\": 340.0,\n \"max_i_ka\": 0.28\n },\n \"149-AL1/24-ST1A 10.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.194,\n \"q_mm2\": 149,\n \"x_ohm_per_km\": 0.315,\n \"c_nf_per_km\": 11.25,\n \"max_i_ka\": 0.47\n },\n \"15-AL1/3-ST1A 0.4\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 1.8769,\n \"q_mm2\": 16,\n \"x_ohm_per_km\": 0.35,\n \"c_nf_per_km\": 11.0,\n \"max_i_ka\": 0.105\n },\n \"NA2XS2Y 1x185 RM/25 6/10 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.161,\n \"q_mm2\": 185,\n \"x_ohm_per_km\": 0.11,\n \"c_nf_per_km\": 406.0,\n \"max_i_ka\": 0.358\n },\n \"NA2XS2Y 1x240 RM/25 6/10 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.122,\n \"q_mm2\": 240,\n \"x_ohm_per_km\": 0.105,\n \"c_nf_per_km\": 456.0,\n \"max_i_ka\": 0.416\n },\n \"N2XS(FL)2Y 1x240 RM/35 64/110 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.075,\n \"q_mm2\": 240,\n \"x_ohm_per_km\": 0.149,\n \"c_nf_per_km\": 135.0,\n \"max_i_ka\": 0.526\n },\n \"NAYY 4x120 SE\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.225,\n \"q_mm2\": 120,\n \"x_ohm_per_km\": 0.08,\n \"c_nf_per_km\": 264.0,\n \"max_i_ka\": 0.242\n },\n \"48-AL1/8-ST1A 10.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.5939,\n \"q_mm2\": 48,\n \"x_ohm_per_km\": 0.35,\n \"c_nf_per_km\": 10.1,\n \"max_i_ka\": 0.21\n },\n \"94-AL1/15-ST1A 10.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.306,\n \"q_mm2\": 94,\n \"x_ohm_per_km\": 0.33,\n \"c_nf_per_km\": 10.75,\n \"max_i_ka\": 0.35\n },\n \"NA2XS2Y 1x70 RM/25 12/20 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.443,\n \"q_mm2\": 70,\n \"x_ohm_per_km\": 0.132,\n \"c_nf_per_km\": 190.0,\n \"max_i_ka\": 0.22\n },\n \"243-AL1/39-ST1A 20.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.1188,\n \"q_mm2\": 243,\n \"x_ohm_per_km\": 0.32,\n \"c_nf_per_km\": 11.0,\n \"max_i_ka\": 0.645\n },\n \"NA2XS2Y 1x150 RM/25 6/10 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.206,\n \"q_mm2\": 150,\n \"x_ohm_per_km\": 0.11,\n \"c_nf_per_km\": 360.0,\n \"max_i_ka\": 0.315\n },\n \"184-AL1/30-ST1A 110.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.1571,\n \"q_mm2\": 184,\n \"x_ohm_per_km\": 0.4,\n \"c_nf_per_km\": 8.8,\n \"max_i_ka\": 0.535\n },\n \"149-AL1/24-ST1A 110.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.194,\n \"q_mm2\": 149,\n \"x_ohm_per_km\": 0.41,\n \"c_nf_per_km\": 8.75,\n \"max_i_ka\": 0.47\n },\n \"NA2XS2Y 1x240 RM/25 12/20 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.122,\n \"q_mm2\": 240,\n \"x_ohm_per_km\": 0.112,\n \"c_nf_per_km\": 304.0,\n \"max_i_ka\": 0.421\n },\n \"122-AL1/20-ST1A 20.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.2376,\n \"q_mm2\": 122,\n \"x_ohm_per_km\": 0.344,\n \"c_nf_per_km\": 10.3,\n \"max_i_ka\": 0.41\n },\n \"48-AL1/8-ST1A 20.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.5939,\n \"q_mm2\": 48,\n \"x_ohm_per_km\": 0.372,\n \"c_nf_per_km\": 9.5,\n \"max_i_ka\": 0.21\n },\n \"34-AL1/6-ST1A 10.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.8342,\n \"q_mm2\": 34,\n \"x_ohm_per_km\": 0.36,\n \"c_nf_per_km\": 9.7,\n \"max_i_ka\": 0.17\n },\n \"24-AL1/4-ST1A 0.4\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 1.2012,\n \"q_mm2\": 24,\n \"x_ohm_per_km\": 0.335,\n \"c_nf_per_km\": 11.25,\n \"max_i_ka\": 0.14\n },\n \"184-AL1/30-ST1A 20.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.1571,\n \"q_mm2\": 184,\n \"x_ohm_per_km\": 0.33,\n \"c_nf_per_km\": 10.75,\n \"max_i_ka\": 0.535\n },\n \"94-AL1/15-ST1A 20.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.306,\n \"q_mm2\": 94,\n \"x_ohm_per_km\": 0.35,\n \"c_nf_per_km\": 10.0,\n \"max_i_ka\": 0.35\n },\n \"NAYY 4x50 SE\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.642,\n \"q_mm2\": 50,\n \"x_ohm_per_km\": 0.083,\n \"c_nf_per_km\": 210.0,\n \"max_i_ka\": 0.142\n },\n \"490-AL1/64-ST1A 380.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.059,\n \"q_mm2\": 490,\n \"x_ohm_per_km\": 0.253,\n \"c_nf_per_km\": 11.0,\n \"max_i_ka\": 0.96\n },\n \"48-AL1/8-ST1A 0.4\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.5939,\n \"q_mm2\": 48,\n \"x_ohm_per_km\": 0.3,\n \"c_nf_per_km\": 12.2,\n \"max_i_ka\": 0.21\n },\n \"NA2XS2Y 1x95 RM/25 6/10 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.313,\n \"q_mm2\": 95,\n \"x_ohm_per_km\": 0.123,\n \"c_nf_per_km\": 315.0,\n \"max_i_ka\": 0.249\n },\n \"NA2XS2Y 1x120 RM/25 12/20 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.253,\n \"q_mm2\": 120,\n \"x_ohm_per_km\": 0.119,\n \"c_nf_per_km\": 230.0,\n \"max_i_ka\": 0.283\n },\n \"34-AL1/6-ST1A 20.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.8342,\n \"q_mm2\": 34,\n \"x_ohm_per_km\": 0.382,\n \"c_nf_per_km\": 9.15,\n \"max_i_ka\": 0.17\n },\n \"94-AL1/15-ST1A 0.4\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.306,\n \"q_mm2\": 94,\n \"x_ohm_per_km\": 0.29,\n \"c_nf_per_km\": 13.2,\n \"max_i_ka\": 0.35\n },\n \"NA2XS2Y 1x185 RM/25 12/20 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.161,\n \"q_mm2\": 185,\n \"x_ohm_per_km\": 0.117,\n \"c_nf_per_km\": 273.0,\n \"max_i_ka\": 0.362\n },\n \"NA2XS2Y 1x150 RM/25 12/20 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.206,\n \"q_mm2\": 150,\n \"x_ohm_per_km\": 0.116,\n \"c_nf_per_km\": 250.0,\n \"max_i_ka\": 0.319\n },\n \"243-AL1/39-ST1A 110.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.1188,\n \"q_mm2\": 243,\n \"x_ohm_per_km\": 0.39,\n \"c_nf_per_km\": 9.0,\n \"max_i_ka\": 0.645\n },\n \"490-AL1/64-ST1A 220.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.059,\n \"q_mm2\": 490,\n \"x_ohm_per_km\": 0.285,\n \"c_nf_per_km\": 10.0,\n \"max_i_ka\": 0.96\n },\n \"N2XS(FL)2Y 1x185 RM/35 64/110 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.099,\n \"q_mm2\": 185,\n \"x_ohm_per_km\": 0.156,\n \"c_nf_per_km\": 125.0,\n \"max_i_ka\": 0.457\n },\n \"N2XS(FL)2Y 1x120 RM/35 64/110 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.153,\n \"q_mm2\": 120,\n \"x_ohm_per_km\": 0.166,\n \"c_nf_per_km\": 112.0,\n \"max_i_ka\": 0.366\n },\n \"NA2XS2Y 1x95 RM/25 12/20 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.313,\n \"q_mm2\": 95,\n \"x_ohm_per_km\": 0.132,\n \"c_nf_per_km\": 216.0,\n \"max_i_ka\": 0.252\n },\n \"122-AL1/20-ST1A 10.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.2376,\n \"q_mm2\": 122,\n \"x_ohm_per_km\": 0.323,\n \"c_nf_per_km\": 11.1,\n \"max_i_ka\": 0.41\n },\n \"149-AL1/24-ST1A 20.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.194,\n \"q_mm2\": 149,\n \"x_ohm_per_km\": 0.337,\n \"c_nf_per_km\": 10.5,\n \"max_i_ka\": 0.47\n },\n \"70-AL1/11-ST1A 10.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.4132,\n \"q_mm2\": 70,\n \"x_ohm_per_km\": 0.339,\n \"c_nf_per_km\": 10.4,\n \"max_i_ka\": 0.29\n },\n \"305-AL1/39-ST1A 110.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.0949,\n \"q_mm2\": 305,\n \"x_ohm_per_km\": 0.38,\n \"c_nf_per_km\": 9.2,\n \"max_i_ka\": 0.74\n }\n },\n \"trafo\": {\n \"0.4 MVA 20/0.4 kV\": {\n \"shift_degree\": 150,\n \"vector_group\": \"Dyn5\",\n \"vn_hv_kv\": 20.0,\n \"pfe_kw\": 1.35,\n \"i0_percent\": 0.3375,\n \"vn_lv_kv\": 0.4,\n \"sn_mva\": 0.4,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -2,\n \"vkr_percent\": 1.425,\n \"tap_step_percent\": 2.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 2,\n \"vk_percent\": 6.0\n },\n \"63 MVA 110/20 kV v1.4.3 and older\": {\n \"shift_degree\": 150,\n \"vector_group\": \"YNd5\",\n \"vn_hv_kv\": 110.0,\n \"pfe_kw\": 33.0,\n \"i0_percent\": 0.086,\n \"vn_lv_kv\": 20.0,\n \"sn_mva\": 63.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.322,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 11.2\n },\n \"63 MVA 110/10 kV v1.4.3 and older\": {\n \"shift_degree\": 150,\n \"vector_group\": \"YNd5\",\n \"vn_hv_kv\": 110.0,\n \"pfe_kw\": 31.51,\n \"i0_percent\": 0.078,\n \"vn_lv_kv\": 10.0,\n \"sn_mva\": 63.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.31,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 10.04\n },\n \"25 MVA 110/20 kV v1.4.3 and older\": {\n \"shift_degree\": 150,\n \"vector_group\": \"YNd5\",\n \"vn_hv_kv\": 110.0,\n \"pfe_kw\": 29.0,\n \"i0_percent\": 0.071,\n \"vn_lv_kv\": 20.0,\n \"sn_mva\": 25.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.282,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 11.2\n },\n \"40 MVA 110/20 kV v1.4.3 and older\": {\n \"shift_degree\": 150,\n \"vector_group\": \"YNd5\",\n \"vn_hv_kv\": 110.0,\n \"pfe_kw\": 31.0,\n \"i0_percent\": 0.08,\n \"vn_lv_kv\": 20.0,\n \"sn_mva\": 40.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.302,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 11.2\n },\n \"0.25 MVA 20/0.4 kV\": {\n \"shift_degree\": 150,\n \"vector_group\": \"Yzn5\",\n \"vn_hv_kv\": 20.0,\n \"pfe_kw\": 0.8,\n \"i0_percent\": 0.32,\n \"vn_lv_kv\": 0.4,\n \"sn_mva\": 0.25,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -2,\n \"vkr_percent\": 1.44,\n \"tap_step_percent\": 2.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 2,\n \"vk_percent\": 6.0\n },\n \"25 MVA 110/10 kV v1.4.3 and older\": {\n \"shift_degree\": 150,\n \"vector_group\": \"YNd5\",\n \"vn_hv_kv\": 110.0,\n \"pfe_kw\": 28.51,\n \"i0_percent\": 0.073,\n \"vn_lv_kv\": 10.0,\n \"sn_mva\": 25.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.276,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 10.04\n },\n \"0.25 MVA 10/0.4 kV\": {\n \"shift_degree\": 150,\n \"vector_group\": \"Dyn5\",\n \"vn_hv_kv\": 10.0,\n \"pfe_kw\": 0.6,\n \"i0_percent\": 0.24,\n \"vn_lv_kv\": 0.4,\n \"sn_mva\": 0.25,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -2,\n \"vkr_percent\": 1.2,\n \"tap_step_percent\": 2.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 2,\n \"vk_percent\": 4.0\n },\n \"160 MVA 380/110 kV\": {\n \"shift_degree\": 0,\n \"vector_group\": \"Yy0\",\n \"vn_hv_kv\": 380.0,\n \"pfe_kw\": 60.0,\n \"i0_percent\": 0.06,\n \"vn_lv_kv\": 110.0,\n \"sn_mva\": 160.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.25,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 12.2\n },\n \"63 MVA 110/10 kV\": {\n \"shift_degree\": 150,\n \"vector_group\": \"YNd5\",\n \"vn_hv_kv\": 110.0,\n \"pfe_kw\": 22.0,\n \"i0_percent\": 0.04,\n \"vn_lv_kv\": 10.0,\n \"sn_mva\": 63.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.32,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 18.0\n },\n \"0.63 MVA 20/0.4 kV\": {\n \"shift_degree\": 150,\n \"vector_group\": \"Dyn5\",\n \"vn_hv_kv\": 20.0,\n \"pfe_kw\": 1.65,\n \"i0_percent\": 0.2619,\n \"vn_lv_kv\": 0.4,\n \"sn_mva\": 0.63,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -2,\n \"vkr_percent\": 1.206,\n \"tap_step_percent\": 2.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 2,\n \"vk_percent\": 6.0\n },\n \"0.4 MVA 10/0.4 kV\": {\n \"shift_degree\": 150,\n \"vector_group\": \"Dyn5\",\n \"vn_hv_kv\": 10.0,\n \"pfe_kw\": 0.95,\n \"i0_percent\": 0.2375,\n \"vn_lv_kv\": 0.4,\n \"sn_mva\": 0.4,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -2,\n \"vkr_percent\": 1.325,\n \"tap_step_percent\": 2.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 2,\n \"vk_percent\": 4.0\n },\n \"0.63 MVA 10/0.4 kV\": {\n \"shift_degree\": 150,\n \"vector_group\": \"Dyn5\",\n \"vn_hv_kv\": 10.0,\n \"pfe_kw\": 1.18,\n \"i0_percent\": 0.1873,\n \"vn_lv_kv\": 0.4,\n \"sn_mva\": 0.63,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -2,\n \"vkr_percent\": 1.0794,\n \"tap_step_percent\": 2.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 2,\n \"vk_percent\": 4.0\n },\n \"63 MVA 110/20 kV\": {\n \"shift_degree\": 150,\n \"vector_group\": \"YNd5\",\n \"vn_hv_kv\": 110.0,\n \"pfe_kw\": 22.0,\n \"i0_percent\": 0.04,\n \"vn_lv_kv\": 20.0,\n \"sn_mva\": 63.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.32,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 18.0\n },\n \"100 MVA 220/110 kV\": {\n \"shift_degree\": 0,\n \"vector_group\": \"Yy0\",\n \"vn_hv_kv\": 220.0,\n \"pfe_kw\": 55.0,\n \"i0_percent\": 0.06,\n \"vn_lv_kv\": 110.0,\n \"sn_mva\": 100.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.26,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 12.0\n },\n \"25 MVA 110/10 kV\": {\n \"shift_degree\": 150,\n \"vector_group\": \"YNd5\",\n \"vn_hv_kv\": 110.0,\n \"pfe_kw\": 14.0,\n \"i0_percent\": 0.07,\n \"vn_lv_kv\": 10.0,\n \"sn_mva\": 25.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.41,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 12.0\n },\n \"40 MVA 110/20 kV\": {\n \"shift_degree\": 150,\n \"vector_group\": \"YNd5\",\n \"vn_hv_kv\": 110.0,\n \"pfe_kw\": 18.0,\n \"i0_percent\": 0.05,\n \"vn_lv_kv\": 20.0,\n \"sn_mva\": 40.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.34,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 16.2\n },\n \"40 MVA 110/10 kV v1.4.3 and older\": {\n \"shift_degree\": 150,\n \"vector_group\": \"YNd5\",\n \"vn_hv_kv\": 110.0,\n \"pfe_kw\": 30.45,\n \"i0_percent\": 0.076,\n \"vn_lv_kv\": 10.0,\n \"sn_mva\": 40.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.295,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 10.04\n },\n \"25 MVA 110/20 kV\": {\n \"shift_degree\": 150,\n \"vector_group\": \"YNd5\",\n \"vn_hv_kv\": 110.0,\n \"pfe_kw\": 14.0,\n \"i0_percent\": 0.07,\n \"vn_lv_kv\": 20.0,\n \"sn_mva\": 25.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.41,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 12.0\n },\n \"40 MVA 110/10 kV\": {\n \"shift_degree\": 150,\n \"vector_group\": \"YNd5\",\n \"vn_hv_kv\": 110.0,\n \"pfe_kw\": 18.0,\n \"i0_percent\": 0.05,\n \"vn_lv_kv\": 10.0,\n \"sn_mva\": 40.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.34,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 16.2\n }\n },\n \"trafo3w\": {\n \"63/25/38 MVA 110/10/10 kV\": {\n \"vector_group\": \"YN0yn0yn0\",\n \"vn_mv_kv\": 10,\n \"vn_lv_kv\": 10,\n \"shift_lv_degree\": 0,\n \"shift_mv_degree\": 0,\n \"pfe_kw\": 35,\n \"vn_hv_kv\": 110,\n \"i0_percent\": 0.89,\n \"sn_lv_mva\": 38.0,\n \"sn_hv_mva\": 63.0,\n \"sn_mv_mva\": 25.0,\n \"vkr_lv_percent\": 0.35,\n \"tap_neutral\": 0,\n \"tap_min\": -10,\n \"vk_mv_percent\": 10.4,\n \"vkr_hv_percent\": 0.28,\n \"vk_lv_percent\": 10.4,\n \"tap_max\": 10,\n \"vkr_mv_percent\": 0.32,\n \"tap_step_percent\": 1.2,\n \"tap_side\": \"hv\",\n \"vk_hv_percent\": 10.4\n },\n \"63/25/38 MVA 110/20/10 kV\": {\n \"vector_group\": \"YN0yn0yn0\",\n \"vn_mv_kv\": 20,\n \"vn_lv_kv\": 10,\n \"shift_lv_degree\": 0,\n \"shift_mv_degree\": 0,\n \"pfe_kw\": 35,\n \"vn_hv_kv\": 110,\n \"i0_percent\": 0.89,\n \"sn_lv_mva\": 38.0,\n \"sn_hv_mva\": 63.0,\n \"sn_mv_mva\": 25.0,\n \"vkr_lv_percent\": 0.35,\n \"tap_neutral\": 0,\n \"tap_min\": -10,\n \"vk_mv_percent\": 10.4,\n \"vkr_hv_percent\": 0.28,\n \"vk_lv_percent\": 10.4,\n \"tap_max\": 10,\n \"vkr_mv_percent\": 0.32,\n \"tap_step_percent\": 1.2,\n \"tap_side\": \"hv\",\n \"vk_hv_percent\": 10.4\n }\n }\n},\"storage\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"name\\\",\\\"bus\\\",\\\"p_mw\\\",\\\"q_mvar\\\",\\\"sn_mva\\\",\\\"soc_percent\\\",\\\"min_e_mwh\\\",\\\"max_e_mwh\\\",\\\"scaling\\\",\\\"in_service\\\",\\\"type\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"name\": \"object\",\n \"bus\": \"int64\",\n \"p_mw\": \"float64\",\n \"q_mvar\": \"float64\",\n \"sn_mva\": \"float64\",\n \"soc_percent\": \"float64\",\n \"min_e_mwh\": \"float64\",\n \"max_e_mwh\": \"float64\",\n \"scaling\": \"float64\",\n \"in_service\": \"bool\",\n \"type\": \"object\"\n },\n \"orient\": \"split\"\n},\"switch\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"bus\\\",\\\"element\\\",\\\"et\\\",\\\"type\\\",\\\"closed\\\",\\\"name\\\",\\\"z_ohm\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"bus\": \"int64\",\n \"element\": \"int64\",\n \"et\": \"object\",\n \"type\": \"object\",\n \"closed\": \"bool\",\n \"name\": \"object\",\n \"z_ohm\": \"float64\"\n },\n \"orient\": \"split\"\n},\"trafo\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"df\\\",\\\"hv_bus\\\",\\\"i0_percent\\\",\\\"in_service\\\",\\\"lv_bus\\\",\\\"max_loading_percent\\\",\\\"name\\\",\\\"parallel\\\",\\\"pfe_kw\\\",\\\"shift_degree\\\",\\\"sn_mva\\\",\\\"std_type\\\",\\\"tap_max\\\",\\\"tap_neutral\\\",\\\"tap_min\\\",\\\"tap_phase_shifter\\\",\\\"tap_pos\\\",\\\"tap_side\\\",\\\"tap_step_degree\\\",\\\"tap_step_percent\\\",\\\"vn_hv_kv\\\",\\\"vn_lv_kv\\\",\\\"vk_percent\\\",\\\"vkr_percent\\\"],\\\"index\\\":[0,1,2,3,4],\\\"data\\\":[[1.0,3,0.0,true,6,100.0,null,1,0.0,0.0,9900.0,null,null,0.0,null,false,-1.0,\\\"hv\\\",null,2.2,135.0,14.0,2070.288000000000011,0.0],[1.0,3,0.0,true,8,100.0,null,1,0.0,0.0,9900.0,null,null,0.0,null,false,-1.0,\\\"hv\\\",null,3.1,135.0,0.208,5506.181999999999789,0.0],[1.0,4,0.0,true,5,100.0,null,1,0.0,0.0,9900.0,null,null,0.0,null,false,-1.0,\\\"hv\\\",null,6.8,135.0,0.208,2494.998000000000047,0.0],[1.0,6,0.0,true,7,100.0,null,1,0.0,0.0,9900.0,null,null,null,null,false,null,null,null,null,14.0,12.0,1743.884999999999991,0.0],[1.0,6,0.0,true,8,100.0,null,1,0.0,0.0,9900.0,null,null,null,null,false,null,null,null,null,14.0,0.208,1089.098999999999933,0.0]]}\",\n \"dtype\": {\n \"df\": \"float64\",\n \"hv_bus\": \"uint32\",\n \"i0_percent\": \"float64\",\n \"in_service\": \"bool\",\n \"lv_bus\": \"uint32\",\n \"max_loading_percent\": \"float64\",\n \"name\": \"object\",\n \"parallel\": \"uint32\",\n \"pfe_kw\": \"float64\",\n \"shift_degree\": \"float64\",\n \"sn_mva\": \"float64\",\n \"std_type\": \"object\",\n \"tap_max\": \"float64\",\n \"tap_neutral\": \"float64\",\n \"tap_min\": \"float64\",\n \"tap_phase_shifter\": \"bool\",\n \"tap_pos\": \"float64\",\n \"tap_side\": \"object\",\n \"tap_step_degree\": \"float64\",\n \"tap_step_percent\": \"float64\",\n \"vn_hv_kv\": \"float64\",\n \"vn_lv_kv\": \"float64\",\n \"vk_percent\": \"float64\",\n \"vkr_percent\": \"float64\"\n },\n \"orient\": \"split\"\n},\"trafo3w\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"name\\\",\\\"std_type\\\",\\\"hv_bus\\\",\\\"mv_bus\\\",\\\"lv_bus\\\",\\\"sn_hv_mva\\\",\\\"sn_mv_mva\\\",\\\"sn_lv_mva\\\",\\\"vn_hv_kv\\\",\\\"vn_mv_kv\\\",\\\"vn_lv_kv\\\",\\\"vk_hv_percent\\\",\\\"vk_mv_percent\\\",\\\"vk_lv_percent\\\",\\\"vkr_hv_percent\\\",\\\"vkr_mv_percent\\\",\\\"vkr_lv_percent\\\",\\\"pfe_kw\\\",\\\"i0_percent\\\",\\\"shift_mv_degree\\\",\\\"shift_lv_degree\\\",\\\"tap_side\\\",\\\"tap_neutral\\\",\\\"tap_min\\\",\\\"tap_max\\\",\\\"tap_step_percent\\\",\\\"tap_step_degree\\\",\\\"tap_pos\\\",\\\"tap_at_star_point\\\",\\\"in_service\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"name\": \"object\",\n \"std_type\": \"object\",\n \"hv_bus\": \"uint32\",\n \"mv_bus\": \"uint32\",\n \"lv_bus\": \"uint32\",\n \"sn_hv_mva\": \"float64\",\n \"sn_mv_mva\": \"float64\",\n \"sn_lv_mva\": \"float64\",\n \"vn_hv_kv\": \"float64\",\n \"vn_mv_kv\": \"float64\",\n \"vn_lv_kv\": \"float64\",\n \"vk_hv_percent\": \"float64\",\n \"vk_mv_percent\": \"float64\",\n \"vk_lv_percent\": \"float64\",\n \"vkr_hv_percent\": \"float64\",\n \"vkr_mv_percent\": \"float64\",\n \"vkr_lv_percent\": \"float64\",\n \"pfe_kw\": \"float64\",\n \"i0_percent\": \"float64\",\n \"shift_mv_degree\": \"float64\",\n \"shift_lv_degree\": \"float64\",\n \"tap_side\": \"object\",\n \"tap_neutral\": \"int32\",\n \"tap_min\": \"int32\",\n \"tap_max\": \"int32\",\n \"tap_step_percent\": \"float64\",\n \"tap_step_degree\": \"float64\",\n \"tap_pos\": \"int32\",\n \"tap_at_star_point\": \"bool\",\n \"in_service\": \"bool\"\n },\n \"orient\": \"split\"\n},\"user_pf_options\":{},\"version\":\"2.0.1\",\"ward\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"name\\\",\\\"bus\\\",\\\"ps_mw\\\",\\\"qs_mvar\\\",\\\"qz_mvar\\\",\\\"pz_mw\\\",\\\"in_service\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"name\": \"object\",\n \"bus\": \"uint32\",\n \"ps_mw\": \"float64\",\n \"qs_mvar\": \"float64\",\n \"qz_mvar\": \"float64\",\n \"pz_mw\": \"float64\",\n \"in_service\": \"bool\"\n },\n \"orient\": \"split\"\n},\"xward\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"name\\\",\\\"bus\\\",\\\"ps_mw\\\",\\\"qs_mvar\\\",\\\"qz_mvar\\\",\\\"pz_mw\\\",\\\"r_ohm\\\",\\\"x_ohm\\\",\\\"vm_pu\\\",\\\"in_service\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"name\": \"object\",\n \"bus\": \"uint32\",\n \"ps_mw\": \"float64\",\n \"qs_mvar\": \"float64\",\n \"qz_mvar\": \"float64\",\n \"pz_mw\": \"float64\",\n \"r_ohm\": \"float64\",\n \"x_ohm\": \"float64\",\n \"vm_pu\": \"float64\",\n \"in_service\": \"bool\"\n },\n \"orient\": \"split\"\n}}\n" +} diff --git a/grid2op/data_test/multimix/case14_002/prods_charac.csv b/grid2op/data_test/multimix/case14_002/prods_charac.csv index 21c8943e5..f27dff8db 100644 --- a/grid2op/data_test/multimix/case14_002/prods_charac.csv +++ b/grid2op/data_test/multimix/case14_002/prods_charac.csv @@ -1,6 +1,6 @@ Pmax,Pmin,name,type,bus,max_ramp_up,max_ramp_down,min_up_time,min_down_time,marginal_cost,shut_down_cost,start_cost,x,y,V 150,0.0,gen_1_0,nuclear,1,5,5,96,96,40,10,20,180,10,142.1 200,0.0,gen_2_1,thermal,2,10,10,4,4,70,1,2,646,10,142.1 -70,0.0,gen_5_2,wind,5,0,0,0,0,0,0,0,216,334,22.0 -50,0.0,gen_7_3,solar,7,0,0,0,0,0,0,0,718,280,13.2 +70,0.0,gen_5_2,wind,5,0,0,0,0,0,0,0,216,334,0.208 +50,0.0,gen_7_3,solar,7,0,0,0,0,0,0,0,718,280,12.0 300,0.0,gen_0_4,thermal,0,10,10,4,4,70,1,2,0,199,142.1 \ No newline at end of file diff --git a/grid2op/gym_compat/box_gym_obsspace.py b/grid2op/gym_compat/box_gym_obsspace.py index 76879ef9e..eefe71893 100644 --- a/grid2op/gym_compat/box_gym_obsspace.py +++ b/grid2op/gym_compat/box_gym_obsspace.py @@ -215,9 +215,11 @@ def __init__( ob_sp = grid2op_observation_space ob_sp_cls = type(grid2op_observation_space) - tol_redisp = ( - ob_sp.obs_env._tol_poly - ) # add to gen_p otherwise ... well it can crash + # add to gen_p otherwise ... well it can crash + if ob_sp.obs_env is not None: + tol_redisp = ob_sp.obs_env._tol_poly + else: + tol_redisp = 1e-2 extra_for_losses = _compute_extra_power_for_losses(ob_sp) self._dict_properties = { diff --git a/grid2op/gym_compat/gym_act_space.py b/grid2op/gym_compat/gym_act_space.py index 94bf2ff0f..984de4127 100644 --- a/grid2op/gym_compat/gym_act_space.py +++ b/grid2op/gym_compat/gym_act_space.py @@ -126,24 +126,35 @@ def __init__(self, env, converter=None, dict_variables=None): env, (Environment, MultiMixEnvironment, BaseMultiProcessEnvironment) ): # action_space is an environment - self.initial_act_space = env.action_space - self._init_env = env + # self.initial_act_space = env.action_space + # self._init_env = env + self._template_act = env.action_space() + self._converter = None + self.__is_converter = False elif isinstance(env, ActionSpace) and converter is None: warnings.warn( "It is now deprecated to initialize an Converter with an " "action space. Please use an environment instead." ) - self.initial_act_space = env - self._init_env = None + self._converter = None + self._template_act = env() + self.__is_converter = False + elif isinstance(env, type(self)): + self._template_act = env._template_act.copy() + self._converter = env._converter + self.__is_converter = env.__is_converter else: raise RuntimeError( "GymActionSpace must be created with an Environment or an ActionSpace (or a Converter)" ) dict_ = {} + # TODO Make sure it works well ! if converter is not None and isinstance(converter, Converter): # a converter allows to ... convert the data so they have specific gym space - self.initial_act_space = converter + # self.initial_act_space = converter + self._converter = converter + self._template_act = converter.init_action_space() dict_ = converter.get_gym_dict(type(self)) self.__is_converter = True elif converter is not None: @@ -155,7 +166,7 @@ def __init__(self, env, converter=None, dict_variables=None): ) else: self._fill_dict_act_space( - dict_, self.initial_act_space, dict_variables=dict_variables + dict_, dict_variables=dict_variables ) dict_ = self._fix_dict_keys(dict_) self.__is_converter = False @@ -194,11 +205,11 @@ def reencode_space(self, key, fun): If an attribute has been ignored, for example by :func`GymEnv.keep_only_obs_attr` or and is now present here, it will be re added in the final observation """ - if self._init_env is None: - raise RuntimeError( - "Impossible to reencode a space that has been initialized with an " - "action space as input. Please provide a valid" - ) + # if self._init_env is None: + # raise RuntimeError( + # "Impossible to reencode a space that has been initialized with an " + # "action space as input. Please provide a valid" + # ) if self.__is_converter: raise RuntimeError( "Impossible to reencode a space that is a converter space." @@ -224,13 +235,15 @@ def reencode_space(self, key, fun): else: raise RuntimeError(f"Impossible to find key {key} in your action space") my_dict[key2] = fun - res = type(self)(env=self._init_env, dict_variables=my_dict) + res = type(self)(env=self, dict_variables=my_dict) return res - def _fill_dict_act_space(self, dict_, action_space, dict_variables): + def _fill_dict_act_space(self, dict_, dict_variables): # TODO what about dict_variables !!! for attr_nm, sh, dt in zip( - action_space.attr_list_vect, action_space.shape, action_space.dtype + type(self._template_act).attr_list_vect, + self._template_act.shapes(), + self._template_act.dtypes() ): if sh == 0: # do not add "empty" (=0 dimension) arrays to gym otherwise it crashes @@ -249,7 +262,7 @@ def _fill_dict_act_space(self, dict_, action_space, dict_variables): my_type = type(self)._BoxType(low=-1, high=1, shape=shape, dtype=dt) elif attr_nm == "_set_topo_vect": my_type = type(self)._BoxType(low=-1, - high=type(action_space).n_busbar_per_sub, + high=type(self._template_act).n_busbar_per_sub, shape=shape, dtype=dt) elif dt == dt_bool: # boolean observation space @@ -263,28 +276,28 @@ def _fill_dict_act_space(self, dict_, action_space, dict_variables): SpaceType = type(self)._BoxType if attr_nm == "prod_p": - low = action_space.gen_pmin - high = action_space.gen_pmax + low = type(self._template_act).gen_pmin + high = type(self._template_act).gen_pmax shape = None elif attr_nm == "prod_v": # voltages can't be negative low = 0.0 elif attr_nm == "_redispatch": # redispatch - low = -1.0 * action_space.gen_max_ramp_down - high = 1.0 * action_space.gen_max_ramp_up - low[~action_space.gen_redispatchable] = 0.0 - high[~action_space.gen_redispatchable] = 0.0 + low = -1.0 * type(self._template_act).gen_max_ramp_down + high = 1.0 * type(self._template_act).gen_max_ramp_up + low[~type(self._template_act).gen_redispatchable] = 0.0 + high[~type(self._template_act).gen_redispatchable] = 0.0 elif attr_nm == "_curtail": # curtailment - low = np.zeros(action_space.n_gen, dtype=dt_float) - high = np.ones(action_space.n_gen, dtype=dt_float) - low[~action_space.gen_renewable] = -1.0 - high[~action_space.gen_renewable] = -1.0 + low = np.zeros(type(self._template_act).n_gen, dtype=dt_float) + high = np.ones(type(self._template_act).n_gen, dtype=dt_float) + low[~type(self._template_act).gen_renewable] = -1.0 + high[~type(self._template_act).gen_renewable] = -1.0 elif attr_nm == "_storage_power": # storage power - low = -1.0 * action_space.storage_max_p_prod - high = 1.0 * action_space.storage_max_p_absorb + low = -1.0 * type(self._template_act).storage_max_p_prod + high = 1.0 * type(self._template_act).storage_max_p_absorb my_type = SpaceType(low=low, high=high, shape=shape, dtype=dt) if my_type is None: @@ -317,10 +330,10 @@ def from_gym(self, gymlike_action: OrderedDict) -> object: if self.__is_converter: # case where the action space comes from a converter, in this case the converter takes the # delegation to convert the action to openai gym - res = self.initial_act_space.convert_action_from_gym(gymlike_action) + res = self._converter.convert_action_from_gym(gymlike_action) else: # case where the action space is a "simple" action space - res = self.initial_act_space() + res = self._template_act.copy() for k, v in gymlike_action.items(): internal_k = self.keys_human_2_grid2op[k] if internal_k in self._keys_encoding: @@ -347,7 +360,7 @@ def to_gym(self, action: object) -> OrderedDict: """ if self.__is_converter: - gym_action = self.initial_act_space.convert_action_to_gym(action) + gym_action = self._converter.convert_action_to_gym(action) else: # in that case action should be an instance of grid2op BaseAction assert isinstance( diff --git a/grid2op/gym_compat/gym_obs_space.py b/grid2op/gym_compat/gym_obs_space.py index f74b3e43a..170435d05 100644 --- a/grid2op/gym_compat/gym_obs_space.py +++ b/grid2op/gym_compat/gym_obs_space.py @@ -85,17 +85,46 @@ class __AuxGymObservationSpace: `env.gen_pmin` and `env.gen_pmax` are not always ensured in grid2op) """ - + ALLOWED_ENV_CLS = (Environment, MultiMixEnvironment, BaseMultiProcessEnvironment) def __init__(self, env, dict_variables=None): if not isinstance( - env, (Environment, MultiMixEnvironment, BaseMultiProcessEnvironment) + env, type(self).ALLOWED_ENV_CLS + (type(self), ) ): raise RuntimeError( "GymActionSpace must be created with an Environment of an ActionSpace (or a Converter)" ) - self._init_env = env - self.initial_obs_space = self._init_env.observation_space + # self._init_env = env + if isinstance(env, type(self).ALLOWED_ENV_CLS): + init_env_cls = type(env) + if init_env_cls._CLS_DICT_EXTENDED is None: + # make sure the _CLS_DICT_EXTENDED exists + tmp_ = {} + init_env_cls._make_cls_dict_extended(init_env_cls, res=tmp_, as_list=False, copy_=False, _topo_vect_only=False) + self.init_env_cls_dict = init_env_cls._CLS_DICT_EXTENDED.copy() + # retrieve an empty observation an disable the forecast feature + self.initial_obs = env.observation_space.get_empty_observation() + self.initial_obs._obs_env = None + self.initial_obs._ptr_kwargs_env = None + + if env.observation_space.obs_env is not None: + self._tol_poly = env.observation_space.obs_env._tol_poly + else: + self._tol_poly = 1e-2 + self._env_params = env.parameters + self._opp_attack_max_duration = env._oppSpace.attack_max_duration + elif isinstance(env, type(self)): + self.init_env_cls_dict = env.init_env_cls_dict.copy() + + # retrieve an empty observation an disable the forecast feature + self.initial_obs = env.initial_obs + + self._tol_poly = env._tol_poly + self._env_params = env._env_params + self._opp_attack_max_duration = env._opp_attack_max_duration + else: + raise RuntimeError("Unknown way to build a gym observation space") + dict_ = {} # will represent the gym.Dict space if dict_variables is None: @@ -105,48 +134,48 @@ def __init__(self, env, dict_variables=None): type(self)._BoxType( low=0., high=np.inf, - shape=(self._init_env.n_line, ), + shape=(self.init_env_cls_dict["n_line"], ), dtype=dt_float, ), "theta_or": type(self)._BoxType( low=-180., high=180., - shape=(self._init_env.n_line, ), + shape=(self.init_env_cls_dict["n_line"], ), dtype=dt_float, ), "theta_ex": type(self)._BoxType( low=-180., high=180., - shape=(self._init_env.n_line, ), + shape=(self.init_env_cls_dict["n_line"], ), dtype=dt_float, ), "load_theta": type(self)._BoxType( low=-180., high=180., - shape=(self._init_env.n_load, ), + shape=(self.init_env_cls_dict["n_load"], ), dtype=dt_float, ), "gen_theta": type(self)._BoxType( low=-180., high=180., - shape=(self._init_env.n_gen, ), + shape=(self.init_env_cls_dict["n_gen"], ), dtype=dt_float, ) } - if self._init_env.n_storage: + if self.init_env_cls_dict["n_storage"]: dict_variables["storage_theta"] = type(self)._BoxType( low=-180., high=180., - shape=(self._init_env.n_storage, ), + shape=(self.init_env_cls_dict["n_storage"], ), dtype=dt_float, ) self._fill_dict_obs_space( - dict_, env.observation_space, env.parameters, env._oppSpace, dict_variables + dict_, dict_variables ) super().__init__(dict_, dict_variables=dict_variables) # super should point to _BaseGymSpaceConverter @@ -202,11 +231,11 @@ def reencode_space(self, key, fun): f"Impossible to find key {key} in your observation space" ) my_dict[key] = fun - res = type(self)(self._init_env, my_dict) + res = type(self)(self, my_dict) return res def _fill_dict_obs_space( - self, dict_, observation_space, env_params, opponent_space, dict_variables={} + self, dict_, dict_variables={} ): for attr_nm in dict_variables: # case where the user specified a dedicated encoding @@ -214,17 +243,17 @@ def _fill_dict_obs_space( # none is by default to disable this feature continue if isinstance(dict_variables[attr_nm], type(self)._SpaceType): - if hasattr(observation_space._template_obj, attr_nm): + if hasattr(self.initial_obs, attr_nm): # add it only if attribute exists in the observation dict_[attr_nm] = dict_variables[attr_nm] else: dict_[attr_nm] = dict_variables[attr_nm].my_space - + # by default consider all attributes that are vectorized for attr_nm, sh, dt in zip( - observation_space.attr_list_vect, - observation_space.shape, - observation_space.dtype, + type(self.initial_obs).attr_list_vect, + self.initial_obs.shapes(), + self.initial_obs.dtypes(), ): if sh == 0: # do not add "empty" (=0 dimension) arrays to gym otherwise it crashes @@ -253,15 +282,15 @@ def _fill_dict_obs_space( my_type = type(self)._DiscreteType(n=8) elif attr_nm == "topo_vect": my_type = type(self)._BoxType(low=-1, - high=observation_space.n_busbar_per_sub, + high=self.init_env_cls_dict["n_busbar_per_sub"], shape=shape, dtype=dt) elif attr_nm == "time_before_cooldown_line": my_type = type(self)._BoxType( low=0, high=max( - env_params.NB_TIMESTEP_COOLDOWN_LINE, - env_params.NB_TIMESTEP_RECONNECTION, - opponent_space.attack_max_duration, + self._env_params.NB_TIMESTEP_COOLDOWN_LINE, + self._env_params.NB_TIMESTEP_RECONNECTION, + self._opp_attack_max_duration, ), shape=shape, dtype=dt, @@ -269,7 +298,7 @@ def _fill_dict_obs_space( elif attr_nm == "time_before_cooldown_sub": my_type = type(self)._BoxType( low=0, - high=env_params.NB_TIMESTEP_COOLDOWN_SUB, + high=self._env_params.NB_TIMESTEP_COOLDOWN_SUB, shape=shape, dtype=dt, ) @@ -314,17 +343,17 @@ def _fill_dict_obs_space( shape = (sh,) SpaceType = type(self)._BoxType if attr_nm == "gen_p" or attr_nm == "gen_p_before_curtail": - low = copy.deepcopy(observation_space.gen_pmin) - high = copy.deepcopy(observation_space.gen_pmax) + low = copy.deepcopy(self.init_env_cls_dict["gen_pmin"]) + high = copy.deepcopy(self.init_env_cls_dict["gen_pmax"]) shape = None # for redispatching - low -= observation_space.obs_env._tol_poly - high += observation_space.obs_env._tol_poly + low -= self._tol_poly + high += self._tol_poly # for "power losses" that are not properly computed in the original data extra_for_losses = _compute_extra_power_for_losses( - observation_space + self.init_env_cls_dict ) low -= extra_for_losses high += extra_for_losses @@ -343,17 +372,17 @@ def _fill_dict_obs_space( elif attr_nm == "target_dispatch" or attr_nm == "actual_dispatch": # TODO check that to be sure low = np.minimum( - observation_space.gen_pmin, -observation_space.gen_pmax + self.init_env_cls_dict["gen_pmin"], -self.init_env_cls_dict["gen_pmax"] ) high = np.maximum( - -observation_space.gen_pmin, +observation_space.gen_pmax + -self.init_env_cls_dict["gen_pmin"], +self.init_env_cls_dict["gen_pmax"] ) elif attr_nm == "storage_power" or attr_nm == "storage_power_target": - low = -observation_space.storage_max_p_prod - high = observation_space.storage_max_p_absorb + low = -self.init_env_cls_dict["storage_max_p_prod"] + high = self.init_env_cls_dict["storage_max_p_absorb"] elif attr_nm == "storage_charge": - low = np.zeros(observation_space.n_storage, dtype=dt_float) - high = observation_space.storage_Emax + low = np.zeros(self.init_env_cls_dict["n_storage"], dtype=dt_float) + high = self.init_env_cls_dict["storage_Emax"] elif ( attr_nm == "curtailment" or attr_nm == "curtailment_limit" @@ -369,10 +398,10 @@ def _fill_dict_obs_space( high = np.inf elif attr_nm == "gen_margin_up": low = 0.0 - high = observation_space.gen_max_ramp_up + high = self.init_env_cls_dict["gen_max_ramp_up"] elif attr_nm == "gen_margin_down": low = 0.0 - high = observation_space.gen_max_ramp_down + high = self.init_env_cls_dict["gen_max_ramp_down"] # curtailment, curtailment_limit, gen_p_before_curtail my_type = SpaceType(low=low, high=high, shape=shape, dtype=dt) @@ -396,7 +425,7 @@ def from_gym(self, gymlike_observation: spaces.dict.OrderedDict) -> BaseObservat grid2oplike_observation: :class:`grid2op.Observation.BaseObservation` The corresponding grid2op observation """ - res = self.initial_obs_space.get_empty_observation() + res = self.initial_obs.copy() for k, v in gymlike_observation.items(): try: res._assign_attr_from_name(k, v) diff --git a/grid2op/gym_compat/gymenv.py b/grid2op/gym_compat/gymenv.py index 15446e6b8..0584ff2ae 100644 --- a/grid2op/gym_compat/gymenv.py +++ b/grid2op/gym_compat/gymenv.py @@ -107,16 +107,28 @@ class behave differently depending on the version of gym you have installed ! def __init__(self, env_init: Environment, shuffle_chronics:Optional[bool]=True, - render_mode: Literal["rgb_array"]="rgb_array"): + render_mode: Literal["rgb_array"]="rgb_array", + with_forecast: bool=False): cls = type(self) check_gym_version(cls._gymnasium) + self.action_space = cls._ActionSpaceType(env_init) + self.observation_space = cls._ObservationSpaceType(env_init) + self.reward_range = env_init.reward_range + self.metadata = env_init.metadata self.init_env = env_init.copy() - self.action_space = cls._ActionSpaceType(self.init_env) - self.observation_space = cls._ObservationSpaceType(self.init_env) - self.reward_range = self.init_env.reward_range - self.metadata = self.init_env.metadata self.init_env.render_mode = render_mode self._shuffle_chronics = shuffle_chronics + if not with_forecast: + # default in grid2op 1.10.3 + # to improve pickle compatibility and speed + self.init_env.deactivate_forecast() + self.init_env._observation_space.obs_env.close() + self.init_env._observation_space.obs_env = None + self.init_env._observation_space._ObsEnv_class = None + self.init_env._last_obs._obs_env = None + self.init_env._last_obs._ptr_kwargs_env = False + self.init_env.current_obs._obs_env = None + self.init_env.current_obs._ptr_kwargs_env = False super().__init__() # super should reference either gym.Env or gymnasium.Env if not hasattr(self, "_np_random"): @@ -219,11 +231,11 @@ def _aux_seed_spaces(self): self.observation_space.seed(next_seed) def _aux_seed_g2op(self, seed): - # then seed the underlying grid2op env - max_ = np.iinfo(dt_int).max - next_seed = sample_seed(max_, self._np_random) - underlying_env_seeds = self.init_env.seed(next_seed) - return seed, next_seed, underlying_env_seeds + # then seed the underlying grid2op env + max_ = np.iinfo(dt_int).max + next_seed = sample_seed(max_, self._np_random) + underlying_env_seeds = self.init_env.seed(next_seed) + return seed, next_seed, underlying_env_seeds def _aux_seed(self, seed: Optional[int]=None): # deprecated in gym >=0.26 diff --git a/grid2op/gym_compat/utils.py b/grid2op/gym_compat/utils.py index 030fa89bb..4374ae4a1 100644 --- a/grid2op/gym_compat/utils.py +++ b/grid2op/gym_compat/utils.py @@ -104,7 +104,8 @@ def _compute_extra_power_for_losses(gridobj): to handle the "because of the power losses gen_pmin and gen_pmax can be slightly altered" """ import numpy as np - + if isinstance(gridobj, dict): + return 0.3*np.abs(gridobj["gen_pmax"]).sum() return 0.3 * np.abs(gridobj.gen_pmax).sum() diff --git a/grid2op/tests/BaseBackendTest.py b/grid2op/tests/BaseBackendTest.py index 10c8b7e87..b75f32e35 100644 --- a/grid2op/tests/BaseBackendTest.py +++ b/grid2op/tests/BaseBackendTest.py @@ -2190,11 +2190,13 @@ def tearDown(self): def test_reset_equals_reset(self): self.skip_if_needed() - # Reset backend1 with reset - self.env1.reset() - # Reset backend2 with reset - self.env2.reset() - self._compare_backends() + with warnings.catch_warnings(): + warnings.filterwarnings("error") + # Reset backend1 with reset + self.env1.reset() + # Reset backend2 with reset + self.env2.reset() + self._compare_backends() def _compare_backends(self): # Compare diff --git a/grid2op/tests/_aux_test_some_gym_issues.py b/grid2op/tests/_aux_test_some_gym_issues.py index 5534865f4..c1c065da3 100644 --- a/grid2op/tests/_aux_test_some_gym_issues.py +++ b/grid2op/tests/_aux_test_some_gym_issues.py @@ -19,7 +19,7 @@ from test_issue_379 import Issue379Tester from test_issue_407 import Issue407Tester from test_issue_418 import Issue418Tester -from test_gym_compat import (TestGymCompatModule, +from test_defaultgym_compat import (TestGymCompatModule, TestBoxGymObsSpace, TestBoxGymActSpace, TestMultiDiscreteGymActSpace, @@ -38,6 +38,15 @@ ) from test_timeOutEnvironment import TestTOEnvGym from test_pickling import TestMultiProc +from test_alert_gym_compat import * +from test_basic_env_ls import TestBasicEnvironmentGym +from test_gym_asynch_env import * +from test_l2rpn_idf_2023 import TestL2RPNIDF2023Tester +from test_MaskedEnvironment import TestMaskedEnvironmentGym +from test_multidiscrete_act_space import * +from test_n_busbar_per_sub import TestGym_3busbars, TestGym_1busbar +from test_timeOutEnvironment import TestTOEnvGym + if __name__ == "__main__": unittest.main() diff --git a/grid2op/tests/automatic_classes.py b/grid2op/tests/automatic_classes.py new file mode 100644 index 000000000..57306c486 --- /dev/null +++ b/grid2op/tests/automatic_classes.py @@ -0,0 +1,799 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +import os +import multiprocessing as mp +from typing import Optional +import warnings +import unittest +import importlib +import numpy as np +from gymnasium.vector import AsyncVectorEnv + + +import grid2op +from grid2op._glop_platform_info import _IS_WINDOWS +from grid2op.Runner import Runner +from grid2op.Agent import BaseAgent +from grid2op.Action import BaseAction +from grid2op.Observation.baseObservation import BaseObservation +from grid2op.Action.actionSpace import ActionSpace +from grid2op.Environment import (Environment, + MaskedEnvironment, + TimedOutEnvironment, + SingleEnvMultiProcess, + MultiMixEnvironment) +from grid2op.Exceptions import NoForecastAvailable +from grid2op.gym_compat import (GymEnv, + BoxGymActSpace, + BoxGymObsSpace, + DiscreteActSpace, + MultiDiscreteActSpace) + +# TODO test the runner saved classes and reload + +# TODO two envs same name => now diff classes + +# TODO test add_to_name +# TODO test noshunt +# TODO grid2op compat version + +# TODO test backend converters +# TODO test all type of backend in the observation space, including the deactivate forecast, reactivate forecast, the different backend etc. + +class _ThisAgentTest(BaseAgent): + def __init__(self, + action_space: ActionSpace, + _read_from_local_dir, + _name_cls_obs, + _name_cls_act, + ): + super().__init__(action_space) + self._read_from_local_dir = _read_from_local_dir + self._name_cls_obs = _name_cls_obs + self._name_cls_act = _name_cls_act + + def act(self, observation: BaseObservation, reward: float, done: bool = False) -> BaseAction: + supermodule_nm, module_nm = os.path.split(self._read_from_local_dir) + super_module = importlib.import_module(module_nm, supermodule_nm) + + # check observation + this_module = importlib.import_module(f"{module_nm}.{self._name_cls_obs}_file", super_module) + if hasattr(this_module, self._name_cls_obs): + this_class_obs = getattr(this_module, self._name_cls_obs) + else: + raise RuntimeError(f"class {self._name_cls_obs} not found") + assert isinstance(observation, this_class_obs) + + # check action + this_module = importlib.import_module(f"{module_nm}.{self._name_cls_act}_file", super_module) + if hasattr(this_module, self._name_cls_act): + this_class_act = getattr(this_module, self._name_cls_act) + else: + raise RuntimeError(f"class {self._name_cls_act} not found") + res = super().act(observation, reward, done) + assert isinstance(res, this_class_act) + return res + + +class AutoClassMakeTester(unittest.TestCase): + """test that the kwargs `class_in_file=False` erase the default behaviour """ + def test_in_make(self): + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make("l2rpn_case14_sandbox", test=True, class_in_file=False) + assert env._read_from_local_dir is None + assert not env.classes_are_in_files() + + +class AutoClassInFileTester(unittest.TestCase): + def get_env_name(self): + return "l2rpn_case14_sandbox" + + def setUp(self) -> None: + self.max_iter = 10 + return super().setUp() + + def _do_test_runner(self): + # false for multi process env + return True + + def _do_test_copy(self): + # for for multi process env + return True + + def _do_test_obs_env(self): + return True + + def _aux_make_env(self, env: Optional[Environment]=None): + if env is None: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make(self.get_env_name(), test=True, class_in_file=True) + assert env.classes_are_in_files() + return env + + def _aux_get_obs_cls(self): + return "CompleteObservation_{}" + + def _aux_get_act_cls(self): + return "PlayableAction_{}" + + def test_all_classes_from_file(self, + env: Optional[Environment]=None, + classes_name=None, + name_complete_obs_cls="CompleteObservation_{}", + name_observation_cls=None, + name_action_cls=None): + if classes_name is None: + classes_name = self.get_env_name() + if name_observation_cls is None: + name_observation_cls = self._aux_get_obs_cls().format(classes_name) + if name_action_cls is None: + name_action_cls = self._aux_get_act_cls().format(classes_name) + + name_action_cls = name_action_cls.format(classes_name) + env = self._aux_make_env(env) + names_cls = [f"ActionSpace_{classes_name}", + f"_BackendAction_{classes_name}", + f"CompleteAction_{classes_name}", + name_observation_cls.format(classes_name), + name_complete_obs_cls.format(classes_name), + f"DontAct_{classes_name}", + f"_ObsEnv_{classes_name}", + f"ObservationSpace_{classes_name}", + f"PandaPowerBackend_{classes_name}", + name_action_cls, + f"VoltageOnlyAction_{classes_name}" + ] + names_attr = ["action_space", + "_backend_action_class", + "_complete_action_cls", + "_observationClass", + None, # Complete Observation in the forecast ! + None, # DONT ACT not int ENV directly + None, # ObsEnv NOT IN ENV, + "observation_space", + "backend", + "_actionClass", + None, # VoltageOnlyAction not in env + ] + + # NB: these imports needs to be consistent with what is done in + # base_env.generate_classes() and gridobj.init_grid(...) + supermodule_nm, module_nm = os.path.split(env._read_from_local_dir) + super_module = importlib.import_module(module_nm, supermodule_nm) + for name_cls, name_attr in zip(names_cls, names_attr): + this_module = importlib.import_module(f"{module_nm}.{name_cls}_file", super_module) + if hasattr(this_module, name_cls): + this_class = getattr(this_module, name_cls) + else: + raise RuntimeError(f"class {name_cls} not found") + if name_attr is not None: + the_attr = getattr(env, name_attr) + if isinstance(the_attr, type): + assert the_attr is this_class, f"error for {the_attr} vs {this_class} env.{name_attr}" + else: + assert type(the_attr) is this_class, f"error for {type(the_attr)} vs {this_class} (env.{name_attr})" + assert this_class._CLS_DICT is not None, f'error for {name_cls}' + assert this_class._CLS_DICT_EXTENDED is not None, f'error for {name_cls}' + + # additional check for some attributes + if name_cls == f"ActionSpace_{classes_name}": + assert type(env._helper_action_env) is this_class + if env.observation_space.obs_env is not None: + # not in _ObsEnv + assert type(env.observation_space.obs_env._helper_action_env) is this_class, f"{type(env.observation_space.obs_env._helper_action_env)}" + if env._voltage_controler is not None: + # not in _ObsEnv + assert type(env._voltage_controler.action_space) is this_class + if env.chronics_handler.action_space is not None: + # not in _ObsEnv + assert type(env.chronics_handler.action_space) is this_class + assert env.chronics_handler.action_space is env._helper_action_env + elif name_cls == f"_BackendAction_{classes_name}": + assert env.backend.my_bk_act_class is this_class + assert isinstance(env._backend_action, this_class) + if env.observation_space.obs_env is not None: + # not in _ObsEnv + assert env.observation_space.obs_env._backend_action_class is this_class + assert env.observation_space.obs_env.backend.my_bk_act_class is this_class + assert isinstance(env.observation_space.obs_env._backend_action, this_class) + elif name_cls == f"CompleteAction_{classes_name}": + assert env.backend._complete_action_class is this_class + + if env.observation_space.obs_env is not None: + # not in _ObsEnv + assert env.observation_space.obs_env._complete_action_cls is this_class + assert env.observation_space.obs_env.backend._complete_action_class is this_class + + assert env.observation_space.obs_env._actionClass is this_class + + assert env._helper_action_env.subtype is this_class + elif name_cls == name_observation_cls.format(classes_name): + # observation of the env + assert env._observation_space.subtype is this_class + if env.current_obs is not None: + # not in _ObsEnv + assert isinstance(env.current_obs, this_class) + if env._last_obs is not None: + # not in _ObsEnv + assert isinstance(env._last_obs, this_class) + elif name_cls == name_observation_cls.format(classes_name): + # observation of the forecast + if env.observation_space.obs_env is not None: + # not in _ObsEnv + assert env._observation_space.obs_env._observation_space.subtype is this_class + if env.observation_space.obs_env.current_obs is not None: + # not in _ObsEnv + assert isinstance(env.observation_space.obs_env.current_obs, this_class) + if env.observation_space.obs_env._last_obs is not None: + # not in _ObsEnv + assert isinstance(env.observation_space.obs_env._last_obs, this_class) + elif name_cls == f"DontAct_{classes_name}": + assert env._oppSpace.action_space.subtype is this_class + assert env._opponent.action_space.subtype is this_class + elif name_cls == f"_ObsEnv_{classes_name}": + if env.observation_space.obs_env is not None: + # not in _ObsEnv + assert type(env.observation_space.obs_env) is this_class + assert isinstance(env.observation_space.obs_env, this_class) + if env.current_obs is not None and env.current_obs._obs_env is not None: + # not in _ObsEnv + assert type(env.current_obs._obs_env) is this_class, f"{type(env.current_obs._obs_env)}" + assert isinstance(env.observation_space.obs_env, this_class) + if env._last_obs is not None and env._last_obs._obs_env is not None: + # not in _ObsEnv + assert type(env._last_obs._obs_env) is this_class, f"{type(env._last_obs._obs_env)}" + if env.observation_space.obs_env is not None: + # not in _ObsEnv + assert env.current_obs._obs_env is env.observation_space.obs_env + assert env._last_obs._obs_env is env.observation_space.obs_env + elif name_cls == f"ObservationSpace_{classes_name}": + if env.observation_space.obs_env is not None: + # not in _ObsEnv + assert type(env.observation_space.obs_env._observation_space) is this_class + assert type(env.observation_space.obs_env._ptr_orig_obs_space) is this_class, f"{type(env.observation_space.obs_env._ptr_orig_obs_space)}" + + assert env.observation_space.obs_env._ptr_orig_obs_space is env._observation_space, f"{type(env.observation_space.obs_env._ptr_orig_obs_space)}" + elif name_cls == name_action_cls: + assert env._action_space.subtype is this_class + # assert env.observation_space.obs_env._actionClass is this_class # not it's a complete action apparently + elif name_cls == f"VoltageOnlyAction_{classes_name}": + if env._voltage_controler is not None: + # not in _ObsEnv + assert env._voltage_controler.action_space.subtype is this_class + # TODO test current_obs and _last_obs + + def test_all_classes_from_file_env_after_reset(self, env: Optional[Environment]=None): + """test classes are still consistent even after a call to env.reset() and obs.simulate()""" + env = self._aux_make_env(env) + obs = env.reset() + self.test_all_classes_from_file(env=env) + try: + obs.simulate(env.action_space()) + self.test_all_classes_from_file(env=env) + except NoForecastAvailable: + # cannot do this test if the "original" env is a _Forecast env: + # for l2rpn_case14_sandbox only 1 step ahead forecast are available + pass + + def test_all_classes_from_file_obsenv(self, env: Optional[Environment]=None): + """test the files are correctly generated for the "forecast env" in the + environment even after a call to obs.reset() and obs.simulate()""" + if not self._do_test_obs_env(): + self.skipTest("ObsEnv is not tested") + env = self._aux_make_env(env) + + self.test_all_classes_from_file(env=env.observation_space.obs_env, + name_action_cls="CompleteAction_{}", + name_observation_cls="CompleteObservation_{}") + + # reset and check the same + obs = env.reset() + self.test_all_classes_from_file(env=env.observation_space.obs_env, + name_action_cls="CompleteAction_{}", + name_observation_cls="CompleteObservation_{}") + self.test_all_classes_from_file(env=obs._obs_env, + name_action_cls="CompleteAction_{}", + name_observation_cls="CompleteObservation_{}") + + # forecast and check the same + try: + obs.simulate(env.action_space()) + self.test_all_classes_from_file(env=env.observation_space.obs_env, + name_action_cls="CompleteAction_{}", + name_observation_cls="CompleteObservation_{}") + self.test_all_classes_from_file(env=obs._obs_env, + name_action_cls="CompleteAction_{}", + name_observation_cls="CompleteObservation_{}") + except NoForecastAvailable: + # cannot do this test if the "original" env is a _Forecast env: + # for l2rpn_case14_sandbox only 1 step ahead forecast are available + pass + + def test_all_classes_from_file_env_cpy(self, env: Optional[Environment]=None): + """test that when an environment is copied, then the copied env is consistent, + that it is consistent after a reset and that the forecast env is consistent""" + if not self._do_test_copy(): + self.skipTest("Copy is not tested") + env = self._aux_make_env(env) + env_cpy = env.copy() + self.test_all_classes_from_file(env=env_cpy) + self.test_all_classes_from_file_env_after_reset(env=env_cpy) + self.test_all_classes_from_file(env=env_cpy.observation_space.obs_env, + name_action_cls="CompleteAction_{}", + name_observation_cls="CompleteObservation_{}" + ) + self.test_all_classes_from_file_obsenv(env=env_cpy) + + def test_all_classes_from_file_env_runner(self, env: Optional[Environment]=None): + """this test, using the defined functions above that the runner is able to create a valid env""" + if not self._do_test_runner(): + self.skipTest("Runner not tested") + env = self._aux_make_env(env) + runner = Runner(**env.get_params_for_runner()) + env_runner = runner.init_env() + self.test_all_classes_from_file(env=env_runner) + self.test_all_classes_from_file_env_after_reset(env=env_runner) + self.test_all_classes_from_file(env=env_runner.observation_space.obs_env, + name_action_cls="CompleteAction_{}", + name_observation_cls="CompleteObservation_{}") + self.test_all_classes_from_file_obsenv(env=env_runner) + + # test the runner prevents the deletion of the tmp file where the classes are stored + # path_cls = env._local_dir_cls + # del env + # assert os.path.exists(path_cls.name) + env_runner = runner.init_env() + self.test_all_classes_from_file(env=env_runner) + self.test_all_classes_from_file_env_after_reset(env=env_runner) + self.test_all_classes_from_file(env=env_runner.observation_space.obs_env, + name_action_cls="CompleteAction_{}", + name_observation_cls="CompleteObservation_{}") + self.test_all_classes_from_file_obsenv(env=env_runner) + + def test_all_classes_from_file_runner_1ep(self, env: Optional[Environment]=None): + """this test that the runner is able to "run" (one type of run), but the tests on the classes + are much lighter than in test_all_classes_from_file_env_runner""" + if not self._do_test_runner(): + self.skipTest("Runner not tested") + env = self._aux_make_env(env) + this_agent = _ThisAgentTest(env.action_space, + env._read_from_local_dir, + self._aux_get_obs_cls().format(self.get_env_name()), + self._aux_get_act_cls().format(self.get_env_name()), + ) + runner = Runner(**env.get_params_for_runner(), + agentClass=None, + agentInstance=this_agent) + runner.run(nb_episode=1, + max_iter=self.max_iter, + env_seeds=[0], + episode_id=[0]) + + def test_all_classes_from_file_runner_2ep_seq(self, env: Optional[Environment]=None): + """this test that the runner is able to "run" (one other type of run), but the tests on the classes + are much lighter than in test_all_classes_from_file_env_runner""" + if not self._do_test_runner(): + self.skipTest("Runner not tested") + env = self._aux_make_env(env) + this_agent = _ThisAgentTest(env.action_space, + env._read_from_local_dir, + self._aux_get_obs_cls().format(self.get_env_name()), + self._aux_get_act_cls().format(self.get_env_name()), + ) + runner = Runner(**env.get_params_for_runner(), + agentClass=None, + agentInstance=this_agent) + res = runner.run(nb_episode=2, + max_iter=self.max_iter, + env_seeds=[0, 0], + episode_id=[0, 1]) + assert res[0][4] == self.max_iter + assert res[1][4] == self.max_iter + + def test_all_classes_from_file_runner_2ep_par_fork(self, env: Optional[Environment]=None): + """this test that the runner is able to "run" (one other type of run), but the tests on the classes + are much lighter than in test_all_classes_from_file_env_runner""" + if not self._do_test_runner(): + self.skipTest("Runner not tested") + if _IS_WINDOWS: + self.skipTest("no fork on windows") + env = self._aux_make_env(env) + this_agent = _ThisAgentTest(env.action_space, + env._read_from_local_dir, + self._aux_get_obs_cls().format(self.get_env_name()), + self._aux_get_act_cls().format(self.get_env_name()), + ) + ctx = mp.get_context('fork') + runner = Runner(**env.get_params_for_runner(), + agentClass=None, + agentInstance=this_agent, + mp_context=ctx) + res = runner.run(nb_episode=2, + nb_process=2, + max_iter=self.max_iter, + env_seeds=[0, 0], + episode_id=[0, 1]) + assert res[0][4] == self.max_iter + assert res[1][4] == self.max_iter + + def test_all_classes_from_file_runner_2ep_par_spawn(self, env: Optional[Environment]=None): + """this test that the runner is able to "run" (one other type of run), but the tests on the classes + are much lighter than in test_all_classes_from_file_env_runner""" + if not self._do_test_runner(): + self.skipTest("Runner not tested") + env = self._aux_make_env(env) + this_agent = _ThisAgentTest(env.action_space, + env._read_from_local_dir, + self._aux_get_obs_cls().format(self.get_env_name()), + self._aux_get_act_cls().format(self.get_env_name()), + ) + ctx = mp.get_context('spawn') + runner = Runner(**env.get_params_for_runner(), + agentClass=None, + agentInstance=this_agent, + mp_context=ctx) + res = runner.run(nb_episode=2, + nb_process=2, + max_iter=self.max_iter, + env_seeds=[0, 0], + episode_id=[0, 1]) + assert res[0][4] == self.max_iter + assert res[1][4] == self.max_iter + + +class MaskedEnvAutoClassTester(AutoClassInFileTester): + + def _aux_make_env(self, env: Optional[Environment]=None): + if env is None: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = MaskedEnvironment(super()._aux_make_env(), + lines_of_interest=np.array([True, True, True, True, True, True, + False, False, False, False, False, False, + False, False, False, False, False, False, + False, False])) + return env + + +class TOEnvAutoClassTester(AutoClassInFileTester): + + def _aux_make_env(self, env: Optional[Environment]=None): + if env is None: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = TimedOutEnvironment(super()._aux_make_env(), + time_out_ms=1e-3) + return env + + +class ForEnvAutoClassTester(AutoClassInFileTester): + + def _aux_make_env(self, env: Optional[Environment]=None): + if env is None: + # we create the reference environment and prevent grid2op to + # to delete it (because it stores the files to the class) + self.ref_env = super()._aux_make_env() + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + obs = self.ref_env.get_obs() + res = obs.get_forecast_env() + self.max_iter = res._max_iter # otherwise it fails in the runner + else: + res = env + return res + + def tearDown(self): + if hasattr(self, "ref_env"): + self.ref_env.close() + + +# class SEMPAUtoClassTester(AutoClassInFileTester): +# """means i need to completely recode `test_all_classes_from_file` to take into account the return +# values which is a list now... and i'm not ready for it yet TODO""" +# def _do_test_runner(self): +# # false for multi process env +# return False + +# def _do_test_copy(self): +# # for for multi process env +# return False + +# def _do_test_obs_env(self): +# return False + +# def _aux_make_env(self, env: Optional[Environment]=None): +# if env is None: +# # we create the reference environment and prevent grid2op to +# # to delete it (because it stores the files to the class) +# self.ref_env = super()._aux_make_env() +# with warnings.catch_warnings(): +# warnings.filterwarnings("ignore") +# res = SingleEnvMultiProcess(self.ref_env, nb_env=2) +# else: +# res = env +# return res + +class GymEnvAutoClassTester(unittest.TestCase): + def setUp(self) -> None: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make("l2rpn_case14_sandbox", + test=True, + class_in_file=True) + self.line_id = 3 + th_lim = self.env.get_thermal_limit() * 2. # avoid all problem in general + th_lim[self.line_id] /= 10. # make sure to get trouble in line 3 + self.env.set_thermal_limit(th_lim) + + GymEnvAutoClassTester._init_env(self.env) + + @staticmethod + def _init_env(env): + env.set_id(0) + env.seed(0) + env.reset() + + def tearDown(self) -> None: + self.env.close() + return super().tearDown() + + def _aux_run_envs(self, act, env_gym): + for i in range(10): + obs_in, reward, done, truncated, info = env_gym.step(act) + if i < 2: # 2 : 2 full steps already + assert obs_in["timestep_overflow"][self.line_id] == i + 1, f"error for step {i}: {obs_in['timestep_overflow'][self.line_id]}" + else: + # cooldown applied for line 3: + # - it disconnect stuff in `self.env_in` + # - it does not affect anything in `self.env_out` + assert not obs_in["line_status"][self.line_id] + + def test_gym_with_step(self): + """test the step function also disconnects (or not) the lines""" + env_gym = GymEnv(self.env) + act = {} + self._aux_run_envs(act, env_gym) + env_gym.reset() + self._aux_run_envs(act, env_gym) + + def test_gym_normal(self): + """test I can create the gym env""" + env_gym = GymEnv(self.env) + env_gym.reset() + + def test_gym_box(self): + """test I can create the gym env with box ob space and act space""" + env_gym = GymEnv(self.env) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env_gym.action_space = BoxGymActSpace(self.env.action_space) + env_gym.observation_space = BoxGymObsSpace(self.env.observation_space) + env_gym.reset() + + def test_gym_discrete(self): + """test I can create the gym env with discrete act space""" + env_gym = GymEnv(self.env) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env_gym.action_space = DiscreteActSpace(self.env.action_space) + env_gym.reset() + act = 0 + self._aux_run_envs(act, env_gym) + + def test_gym_multidiscrete(self): + """test I can create the gym env with multi discrete act space""" + env_gym = GymEnv(self.env) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env_gym.action_space = MultiDiscreteActSpace(self.env.action_space) + env_gym.reset() + act = env_gym.action_space.sample() + act[:] = 0 + self._aux_run_envs(act, env_gym) + + def test_asynch_fork(self): + if _IS_WINDOWS: + self.skipTest("no fork on windows") + async_vect_env = AsyncVectorEnv((lambda: GymEnv(self.env), lambda: GymEnv(self.env)), + context="fork") + obs = async_vect_env.reset() + + def test_asynch_spawn(self): + async_vect_env = AsyncVectorEnv((lambda: GymEnv(self.env), lambda: GymEnv(self.env)), + context="spawn") + obs = async_vect_env.reset() + + +class MultiMixEnvAutoClassTester(AutoClassInFileTester): + def _aux_get_obs_cls(self): + return "ObservationNeurips2020_{}" + + def _aux_get_act_cls(self): + return "ActionNeurips2020_{}" + + def get_env_name(self): + return "l2rpn_neurips_2020_track2" + # TODO gym for that too + + # def _do_test_runner(self): + # return False + + def test_all_classes_from_file(self, + env: Optional[Environment]=None, + classes_name=None, + name_complete_obs_cls="CompleteObservation_{}", + name_observation_cls=None, + name_action_cls=None): + env_orig = env + env = self._aux_make_env(env) + try: + super().test_all_classes_from_file(env, + classes_name=classes_name, + name_complete_obs_cls=name_complete_obs_cls, + name_observation_cls=name_observation_cls, + name_action_cls=name_action_cls + ) + if isinstance(env, MultiMixEnvironment): + # test each mix of a multi mix + for mix in env: + super().test_all_classes_from_file(mix, + classes_name=classes_name, + name_complete_obs_cls=name_complete_obs_cls, + name_observation_cls=name_observation_cls, + name_action_cls=name_action_cls + ) + finally: + if env_orig is None: + # need to clean the env I created + env.close() + + def test_all_classes_from_file_env_after_reset(self, env: Optional[Environment]=None): + env_orig = env + env = self._aux_make_env(env) + try: + super().test_all_classes_from_file_env_after_reset(env) + if isinstance(env, MultiMixEnvironment): + # test each mix of a multimix + for mix in env: + super().test_all_classes_from_file_env_after_reset(mix) + finally: + if env_orig is None: + # need to clean the env I created + env.close() + + def test_all_classes_from_file_obsenv(self, env: Optional[Environment]=None): + env_orig = env + env = self._aux_make_env(env) + try: + super().test_all_classes_from_file_obsenv(env) + if isinstance(env, MultiMixEnvironment): + # test each mix of a multimix + for mix in env: + super().test_all_classes_from_file_obsenv(mix) + finally: + if env_orig is None: + # need to clean the env I created + env.close() + + def test_all_classes_from_file_env_cpy(self, env: Optional[Environment]=None): + env_orig = env + env = self._aux_make_env(env) + try: + super().test_all_classes_from_file_env_cpy(env) + if isinstance(env, MultiMixEnvironment): + # test each mix of a multimix + for mix in env: + super().test_all_classes_from_file_env_cpy(mix) + finally: + if env_orig is None: + # need to clean the env I created + env.close() + + def test_all_classes_from_file_env_runner(self, env: Optional[Environment]=None): + env_orig = env + env = self._aux_make_env(env) + try: + if isinstance(env, MultiMixEnvironment): + # test each mix of a multimix + for mix in env: + super().test_all_classes_from_file_env_runner(mix) + else: + # runner does not handle multimix + super().test_all_classes_from_file_env_runner(env) + finally: + if env_orig is None: + # need to clean the env I created + env.close() + + def test_all_classes_from_file_runner_1ep(self, env: Optional[Environment]=None): + env_orig = env + env = self._aux_make_env(env) + try: + if isinstance(env, MultiMixEnvironment): + # test each mix of a multimix + for mix in env: + super().test_all_classes_from_file_runner_1ep(mix) + else: + # runner does not handle multimix + super().test_all_classes_from_file_runner_1ep(env) + finally: + if env_orig is None: + # need to clean the env I created + env.close() + + def test_all_classes_from_file_runner_2ep_seq(self, env: Optional[Environment]=None): + env_orig = env + env = self._aux_make_env(env) + try: + if isinstance(env, MultiMixEnvironment): + # test each mix of a multimix + for mix in env: + super().test_all_classes_from_file_runner_2ep_seq(mix) + else: + # runner does not handle multimix + super().test_all_classes_from_file_runner_2ep_seq(env) + finally: + if env_orig is None: + # need to clean the env I created + env.close() + + def test_all_classes_from_file_runner_2ep_par_fork(self, env: Optional[Environment]=None): + if _IS_WINDOWS: + self.skipTest("no fork on windows") + env_orig = env + env = self._aux_make_env(env) + try: + if isinstance(env, MultiMixEnvironment): + # test each mix of a multimix + for mix in env: + super().test_all_classes_from_file_runner_2ep_par_fork(mix) + else: + # runner does not handle multimix + super().test_all_classes_from_file_runner_2ep_par_fork(env) + finally: + if env_orig is None: + # need to clean the env I created + env.close() + + def test_all_classes_from_file_runner_2ep_par_spawn(self, env: Optional[Environment]=None): + env_orig = env + env = self._aux_make_env(env) + try: + if isinstance(env, MultiMixEnvironment): + # test each mix of a multimix + for mix in env: + super().test_all_classes_from_file_runner_2ep_par_spawn(mix) + else: + # runner does not handle multimix + super().test_all_classes_from_file_runner_2ep_par_spawn(env) + finally: + if env_orig is None: + # need to clean the env I created + env.close() + + def test_forecast_env_basic(self, env: Optional[Environment]=None): + env_orig = env + env = self._aux_make_env(env) + try: + if isinstance(env, MultiMixEnvironment): + # test each mix of a multimix + for mix in env: + obs = mix.reset() + for_env = obs.get_forecast_env() + super().test_all_classes_from_file(for_env) + finally: + if env_orig is None: + # need to clean the env I created + env.close() + + +if __name__ == "__main__": + unittest.main() diff --git a/grid2op/tests/test_fromChronix2grid.py b/grid2op/tests/fromChronix2grid.py similarity index 100% rename from grid2op/tests/test_fromChronix2grid.py rename to grid2op/tests/fromChronix2grid.py diff --git a/grid2op/tests/test_ChronicsHandler.py b/grid2op/tests/test_ChronicsHandler.py index c19ad2164..1fefb2bc0 100644 --- a/grid2op/tests/test_ChronicsHandler.py +++ b/grid2op/tests/test_ChronicsHandler.py @@ -1122,7 +1122,7 @@ def setUp(self): with warnings.catch_warnings(): warnings.filterwarnings("ignore") self.env = grid2op.make("rte_case14_realistic", test=True, _add_to_name=type(self).__name__) - self.env.chronics_handler.set_max_iter(self.max_iter) + self.env.set_max_iter(self.max_iter) def tearDown(self): self.env.close() @@ -1183,7 +1183,7 @@ def test_load_still(self): ) as env: # test a first time without chunks env.set_id(0) - env.chronics_handler.set_max_iter(max_iter) + env.set_max_iter(max_iter) obs = env.reset() # check that simulate is working diff --git a/grid2op/tests/test_CompactEpisodeData.py b/grid2op/tests/test_CompactEpisodeData.py index 5fcdeeeae..11f9dec78 100644 --- a/grid2op/tests/test_CompactEpisodeData.py +++ b/grid2op/tests/test_CompactEpisodeData.py @@ -260,6 +260,40 @@ def test_with_opponent(self): lines_impacted, subs_impacted = episode_data.attack_space.from_vect(episode_data.attacks[0]).get_topological_impact() assert lines_impacted[3] + def test_can_return_ep_data(self): + # One episode + res = self.runner.run(nb_episode=1, + episode_id=[0], + env_seeds=[0], + max_iter=self.max_iter, + add_detailed_output=True, + nb_process=1 + ) + for el in res: + assert isinstance(el[-1], CompactEpisodeData) + + # 2 episodes, sequential mode + res = self.runner.run(nb_episode=2, + episode_id=[0, 1], + env_seeds=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, + nb_process=1 + ) + for el in res: + assert isinstance(el[-1], CompactEpisodeData) + + # 2 episodes, parrallel mode + res = self.runner.run(nb_episode=2, + episode_id=[0, 1], + env_seeds=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, + nb_process=2 + ) + for el in res: + assert isinstance(el[-1], CompactEpisodeData) + if __name__ == "__main__": unittest.main() diff --git a/grid2op/tests/test_MultiMix.py b/grid2op/tests/test_MultiMix.py index 9ee05802f..0f66ed0b0 100644 --- a/grid2op/tests/test_MultiMix.py +++ b/grid2op/tests/test_MultiMix.py @@ -297,11 +297,10 @@ def test_forecast_toggle(self): def test_bracket_access_by_name(self): mme = MultiMixEnvironment(PATH_DATA_MULTIMIX, _test=True) - mix1_env = mme["case14_001"] - assert mix1_env.name == "case14_001" + assert mix1_env.multimix_mix_name == "case14_001" mix2_env = mme["case14_002"] - assert mix2_env.name == "case14_002" + assert mix2_env.multimix_mix_name == "case14_002" with self.assertRaises(KeyError): unknown_env = mme["unknown_raise"] @@ -312,7 +311,7 @@ def test_keys_access(self): mix = mme[k] assert mix is not None assert isinstance(mix, BaseEnv) - assert mix.name == k + assert mix.multimix_mix_name == k def test_values_access(self): mme = MultiMixEnvironment(PATH_DATA_MULTIMIX, _test=True) @@ -320,7 +319,7 @@ def test_values_access(self): for v in mme.values(): assert v is not None assert isinstance(v, BaseEnv) - assert v == mme[v.name] + assert v == mme[v.multimix_mix_name] def test_values_unique(self): mme = MultiMixEnvironment(PATH_DATA_MULTIMIX, _test=True) diff --git a/grid2op/tests/test_PandaPowerBackendDefaultFunc.py b/grid2op/tests/test_PandaPowerBackendDefaultFunc.py index 847f1d3bb..33a290119 100644 --- a/grid2op/tests/test_PandaPowerBackendDefaultFunc.py +++ b/grid2op/tests/test_PandaPowerBackendDefaultFunc.py @@ -64,7 +64,7 @@ def get_topo_vect(self): """ otherwise there are some infinite recursions """ - res = np.full(self.dim_topo, fill_value=np.NaN, dtype=dt_int) + res = np.full(self.dim_topo, fill_value=-1, dtype=dt_int) line_status = np.concatenate( ( diff --git a/grid2op/tests/test_Runner.py b/grid2op/tests/test_Runner.py index 71446f2ba..0bca1dc73 100644 --- a/grid2op/tests/test_Runner.py +++ b/grid2op/tests/test_Runner.py @@ -13,6 +13,7 @@ import pdb import packaging from packaging import version +import inspect from grid2op.tests.helper_path_test import * @@ -518,6 +519,7 @@ def test_backward_compatibility(self): "1.9.8", "1.10.0", "1.10.1", + "1.10.2", ] curr_version = "test_version" assert ( @@ -636,6 +638,28 @@ def test_legal_ambiguous_nofaststorage(self): assert ep_data.ambiguous[1] assert not ep_data.ambiguous[2] assert not ep_data.ambiguous[3] + + def test_get_params(self): + """test the runner._get_params() function (used in multiprocessing context) + can indeed make a runner with all its arguments modified (proper 'copy' of the runner) + """ + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make("l2rpn_case14_sandbox", test=True, chronics_class=ChangeNothing, + _add_to_name=type(self).__name__) + + runner = Runner(**env.get_params_for_runner(), agentClass=AgentTestLegalAmbiguous) + made_params = runner._get_params() + ok_params = inspect.signature(Runner.__init__).parameters + for k in made_params.keys(): + assert k in ok_params, f"params {k} is returned in runner._get_params() but cannot be used to make a runner" + + for k in ok_params.keys(): + if k == "self": + continue + assert k in made_params, f"params {k} is used to make a runner but is not returned in runner._get_params()" + + if __name__ == "__main__": diff --git a/grid2op/tests/test_action_set_orig_state_options.py b/grid2op/tests/test_action_set_orig_state_options.py index 8e142a302..e42dcf680 100644 --- a/grid2op/tests/test_action_set_orig_state_options.py +++ b/grid2op/tests/test_action_set_orig_state_options.py @@ -380,10 +380,9 @@ def test_run_two_eps_seq_two_acts(self, nb_process=1): {"set_line_status": [(1, 1)], "method": "ignore"}], episode_id=[0, 1], max_iter=self.max_iter, - add_detailed_output=True, + add_detailed_output=True, # TODO HERE HERE nb_process=nb_process ) - # check for ep 0 ep_data = res[0][-1] init_obs = ep_data.observations[0] @@ -404,7 +403,7 @@ def test_run_two_eps_seq_two_acts(self, nb_process=1): {"set_line_status": [(1, 1)], "method": "ignore"}), episode_id=[0, 1], max_iter=self.max_iter, - add_detailed_output=True, + add_detailed_output=True, nb_process=nb_process ) # check for ep 0 diff --git a/grid2op/tests/test_basic_env_ls.py b/grid2op/tests/test_basic_env_ls.py index c3214a26f..1e1496ae1 100644 --- a/grid2op/tests/test_basic_env_ls.py +++ b/grid2op/tests/test_basic_env_ls.py @@ -132,8 +132,8 @@ def test_runner(self): res_in, *_ = runner_in.run(nb_episode=1, max_iter=self.max_iter, env_seeds=[0], episode_id=[0], add_detailed_output=True) res_in2, *_ = runner_in.run(nb_episode=1, max_iter=self.max_iter, env_seeds=[0], episode_id=[0]) # check correct results are obtained when agregated - assert res_in[3] == 10 - assert res_in2[3] == 10 + assert res_in[3] == self.max_iter, f"{res_in[3]} vs {self.max_iter}" + assert res_in2[3] == self.max_iter, f"{res_in[3]} vs {self.max_iter}" assert np.allclose(res_in[2], 645.4992065) assert np.allclose(res_in2[2], 645.4992065) diff --git a/grid2op/tests/test_chronics_npy.py b/grid2op/tests/test_chronics_npy.py index f1173a980..7bf98ee11 100644 --- a/grid2op/tests/test_chronics_npy.py +++ b/grid2op/tests/test_chronics_npy.py @@ -29,7 +29,9 @@ def setUp(self): self.env_name = "l2rpn_case14_sandbox" with warnings.catch_warnings(): warnings.filterwarnings("ignore") - self.env_ref = grid2op.make(self.env_name, test=True, _add_to_name=type(self).__name__) + self.env_ref = grid2op.make(self.env_name, + test=True, + _add_to_name=type(self).__name__) self.load_p = 1.0 * self.env_ref.chronics_handler.real_data.data.load_p self.load_q = 1.0 * self.env_ref.chronics_handler.real_data.data.load_q @@ -105,7 +107,7 @@ def test_proper_start_end_2(self): ), f"error at iteration {ts}" obs, *_ = env.step(env.action_space()) assert np.all(obs_ref.gen_p == obs.gen_p), f"error at iteration {ts}" - assert obs.max_step == END + assert obs.max_step == END - LAG, f"{obs.max_step} vs {END - LAG}" with self.assertRaises(Grid2OpException): env.step( env.action_space() diff --git a/grid2op/tests/test_generate_classes.py b/grid2op/tests/test_generate_classes.py index f88cdcfd8..981592485 100644 --- a/grid2op/tests/test_generate_classes.py +++ b/grid2op/tests/test_generate_classes.py @@ -20,11 +20,12 @@ class TestGenerateFile(unittest.TestCase): def _aux_assert_exists_then_delete(self, env): if isinstance(env, MultiMixEnvironment): - for mix in env: - self._aux_assert_exists_then_delete(mix) + # for mix in env: + # self._aux_assert_exists_then_delete(mix) + self._aux_assert_exists_then_delete(env.mix_envs[0]) elif isinstance(env, Environment): path = Path(env.get_path_env()) / "_grid2op_classes" - assert path.exists() + assert path.exists(), f"path {path} does not exists" shutil.rmtree(path, ignore_errors=True) else: raise RuntimeError("Unknown env type") @@ -37,33 +38,37 @@ def list_env(self): def test_can_generate(self): for env_nm in self.list_env(): - with warnings.catch_warnings(): - warnings.filterwarnings("ignore") - env = grid2op.make(env_nm, test=True, _add_to_name=type(self).__name__+"test_generate") - env.generate_classes() - self._aux_assert_exists_then_delete(env) - env.close() + try: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make(env_nm, test=True, _add_to_name=type(self).__name__+"test_generate") + env.generate_classes() + finally: + self._aux_assert_exists_then_delete(env) + env.close() def test_can_load(self): + _add_to_name = type(self).__name__+"test_load" for env_nm in self.list_env(): with warnings.catch_warnings(): warnings.filterwarnings("ignore") - env = grid2op.make(env_nm, test=True, _add_to_name=type(self).__name__+"_TestGenerateFile") + env = grid2op.make(env_nm, + test=True, + _add_to_name=_add_to_name) env.generate_classes() - with warnings.catch_warnings(): warnings.filterwarnings("ignore") try: env2 = grid2op.make(env_nm, test=True, experimental_read_from_local_dir=True, - _add_to_name=type(self).__name__) + _add_to_name=_add_to_name) env2.close() except RuntimeError as exc_: raise RuntimeError(f"Error for {env_nm}") from exc_ self._aux_assert_exists_then_delete(env) env.close() + if __name__ == "__main__": unittest.main() - \ No newline at end of file diff --git a/grid2op/tests/test_gym_asynch_env.py b/grid2op/tests/test_gym_asynch_env.py index e0cca4c75..c9eb7eb1d 100644 --- a/grid2op/tests/test_gym_asynch_env.py +++ b/grid2op/tests/test_gym_asynch_env.py @@ -181,5 +181,6 @@ def setUp(self) -> None: self.skipTest("Not handled at the moment") return super().setUp() + if __name__ == "__main__": unittest.main() diff --git a/grid2op/tests/test_issue_196.py b/grid2op/tests/test_issue_196.py index c6a4b815d..08f5987d5 100644 --- a/grid2op/tests/test_issue_196.py +++ b/grid2op/tests/test_issue_196.py @@ -49,3 +49,7 @@ def test_issue_196_genp(self): # not great test as it passes with the bug... but just in the case... cannot hurt obs, *_ = self.env_gym.reset() assert obs in self.env_gym.observation_space + + +if __name__ == "__main__": + unittest.main() diff --git a/grid2op/tests/test_issue_616.py b/grid2op/tests/test_issue_616.py new file mode 100644 index 000000000..6a779da33 --- /dev/null +++ b/grid2op/tests/test_issue_616.py @@ -0,0 +1,320 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# See AUTHORS.txt and https://github.com/rte-france/Grid2Op/pull/319 +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +import unittest +import grid2op +import tempfile +import numpy as np +import re +import os +import json +import warnings + +from grid2op.Chronics import (MultifolderWithCache, + GridStateFromFileWithForecastsWithMaintenance, + FromHandlers) +from grid2op.Chronics.handlers import (CSVHandler, + NoisyForecastHandler, + LoadQFromPHandler, + JSONMaintenanceHandler) + +from grid2op.Runner import Runner + + +class Issue616Tester(unittest.TestCase): + def setUp(self): + self.env_name = "l2rpn_case14_sandbox" + # create first env + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make(self.env_name, + test=True) + + # hack for adding maintenance + dict_maint = { + "maintenance_starting_hour": 1, + "maintenance_ending_hour": 2, + "line_to_maintenance": ["1_2_2", "1_4_4", "9_10_12", "12_13_14"], + "daily_proba_per_month_maintenance": [0.7 for _ in range(12)], + "max_daily_number_per_month_maintenance": [1 for _ in range(12)], + "maintenance_day_of_week": list(range(7)) + } + self.tmp_files = [os.path.join(env.get_path_env(), + "chronics", "0000", "maintenance_meta.json"), + os.path.join(env.get_path_env(), + "chronics", "0001", "maintenance_meta.json"), + os.path.join(env.get_path_env(), + "chronics", "0000", "maintenance_meta.json"), + ] + for path in self.tmp_files: + with open(path, "w", encoding="utf-8") as f: + json.dump(fp=f, obj=dict_maint) + env.close() + # create the env with the maintenance + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env_bug = grid2op.make(self.env_name, + chronics_class=MultifolderWithCache, + data_feeding_kwargs={"gridvalueClass": GridStateFromFileWithForecastsWithMaintenance}, + test=True + ) + self.env_bug.chronics_handler.reset() + + # store the normal maintenance schedule: + self.maint_ref = (np.array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, + 298, 299]) + 12, + np.array([4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2])) + + def tearDown(self) -> None: + self.env_bug.close() + for el in self.tmp_files: + if os.path.exists(el): + os.remove(el) + return super().tearDown() + + def test_reset(self): + """test that the seed is used correctly in env.reset""" + obs = self.env_bug.reset(seed=0, options={"time serie id": 0}) + maint_ref = 1. * self.env_bug.chronics_handler.real_data.data.maintenance + + obs = self.env_bug.reset(seed=1, options={"time serie id": 0}) + maint_1 = 1. * self.env_bug.chronics_handler.real_data.data.maintenance + + obs = self.env_bug.reset(seed=0, options={"time serie id": 0}) + maint_0 = 1. * self.env_bug.chronics_handler.real_data.data.maintenance + + assert (maint_ref == maint_0).all() + assert (maint_ref != maint_1).any() + assert (maint_ref.nonzero()[0] == self.maint_ref[0]).all() + assert (maint_ref.nonzero()[1] == self.maint_ref[1]).all() + + def test_runner(self): + """test the runner behaves correctly""" + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + runner = Runner(**self.env_bug.get_params_for_runner()) + res = runner.run(nb_episode=3, + env_seeds=[0, 1, 0], + max_iter=5, + add_detailed_output=True) + + maint_ref = np.array([ -1, -1, 300, -1, 12, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + dtype=np.int32) + assert (res[0][-1].observations[0].time_next_maintenance == maint_ref).all() + assert (res[0][-1].observations[0].time_next_maintenance != res[1][-1].observations[0].time_next_maintenance).any() + assert (res[0][-1].observations[0].time_next_maintenance == res[2][-1].observations[0].time_next_maintenance).all() + + def test_chronics_handler_twice_reset(self): + """test the same results is obtained if the chronics handler is reset twice""" + obs = self.env_bug.reset(seed=0, options={"time serie id": 0}) + maint_ref = 1. * self.env_bug.chronics_handler.real_data.data.maintenance + assert (maint_ref.nonzero()[0] == self.maint_ref[0]).all() + assert (maint_ref.nonzero()[1] == self.maint_ref[1]).all() + + self.env_bug.chronics_handler.reset() + maint_ref = 1. * self.env_bug.chronics_handler.real_data.data.maintenance + assert (maint_ref.nonzero()[0] == self.maint_ref[0]).all() + assert (maint_ref.nonzero()[1] == self.maint_ref[1]).all() + + +class Issue616WithHandlerTester(unittest.TestCase): + def setUp(self): + self.env_name = "l2rpn_case14_sandbox" + hs_ = [5*(i+1) for i in range(12)] + + # create first env + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make(self.env_name, + test=True) + + # hack for adding maintenance + dict_maint = { + "maintenance_starting_hour": 1, + "maintenance_ending_hour": 2, + "line_to_maintenance": ["1_2_2", "1_4_4", "9_10_12", "12_13_14"], + "daily_proba_per_month_maintenance": [0.7 for _ in range(12)], + "max_daily_number_per_month_maintenance": [1 for _ in range(12)], + "maintenance_day_of_week": list(range(7)) + } + self.tmp_json = tempfile.NamedTemporaryFile(dir=os.path.join(env.get_path_env(), "chronics", "0000"), + prefix="maintenance_meta", + suffix=".json") + with open(self.tmp_json.name, "w", encoding="utf-8") as f: + json.dump(fp=f, obj=dict_maint) + + # uses the default noise: sqrt(horizon) * 0.01 : error of 8% 1h ahead + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env_bug = grid2op.make(self.env_name, + chronics_class=MultifolderWithCache, + data_feeding_kwargs={"gridvalueClass": FromHandlers, + "gen_p_handler": CSVHandler("prod_p"), + "load_p_handler": CSVHandler("load_p"), + "gen_v_handler": CSVHandler("prod_v"), + "load_q_handler": LoadQFromPHandler("load_q"), + "h_forecast": hs_, + "maintenance_handler": JSONMaintenanceHandler(json_file_name=self.tmp_json.name), + "gen_p_for_handler": NoisyForecastHandler("prod_p_forecasted"), + "load_p_for_handler": NoisyForecastHandler("load_p_forecasted"), + "load_q_for_handler": NoisyForecastHandler("load_q_forecasted"), + }, + test=True + ) + self.env_bug.chronics_handler.reset() + + # store the normal maintenance schedule: + self.maint_ref = (np.array([ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, + 310, 311]), + np.array([12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14])) + + self.load_p_ref = np.array([[22. , 87. , 45.79999924, 7. , 12. , + 28.20000076, 8.69999981, 3.5 , 5.5 , 12.69999981, + 14.80000019], + [22.44357109, 90.38361359, 46.61357117, 7.00726891, 12.49121857, + 28.84151268, 8.93680668, 3.45285726, 5.58550406, 13.10054588, + 15.43630219], + [22.48419762, 89.22782135, 45.57607269, 6.98833132, 12.35618019, + 28.45972633, 9.01393414, 3.44352579, 5.57040882, 12.96386147, + 15.2933054 ], + [21.85004234, 86.51035309, 44.29330063, 6.82195902, 11.86427689, + 28.2765255 , 8.79933834, 3.36154509, 5.33892441, 12.65522861, + 14.92921543], + [21.61282349, 86.64777374, 44.50276947, 6.68032742, 11.88705349, + 27.90019035, 8.84160995, 3.34016371, 5.30496597, 12.57473373, + 14.63777542], + [23.22621727, 92.27429962, 47.29320145, 7.25162458, 12.71661758, + 30.16255379, 9.24844837, 3.57326436, 5.57008839, 13.34719276, + 15.97459316], + [20.23793983, 81.04374695, 42.03972244, 6.25536346, 10.85489559, + 26.03334999, 8.0951767 , 3.12768173, 5.05948496, 11.49882984, + 13.89058685], + [19.92967606, 81.96430206, 41.73068237, 6.54965878, 11.13441944, + 26.10506821, 8.04672432, 3.08769631, 4.95902777, 11.50868607, + 13.94141674], + [20.64870644, 83.94567871, 42.16581726, 6.56127167, 11.38573551, + 27.0170002 , 8.39456749, 3.1841464 , 5.21042156, 11.96467113, + 14.37690353], + [19.72007751, 79.25064087, 40.82889175, 6.11044645, 10.83215523, + 25.83052444, 7.77693176, 3.05522323, 4.814291 , 11.5728159 , + 13.9799614 ], + [21.79347801, 87.17391205, 42.77978897, 6.76001358, 11.70390511, + 28.14990807, 8.67703247, 3.32955885, 5.24657774, 12.30927849, + 14.83167171], + [19.81615639, 78.61643982, 40.09531021, 6.11152506, 10.64886951, + 25.27948952, 7.87090397, 2.96316385, 4.72254229, 11.20446301, + 13.88982964], + [19.3391819 , 77.26506805, 39.22829056, 6.04922247, 10.44865608, + 24.83847427, 7.8823204 , 2.93295646, 4.76605368, 11.18189621, + 13.19830322]]) + + self.load_q_ref = np.array([15.4 , 60.899998 , 32.059998 , 4.9 , 8.4 , + 19.74 , 6.0899997, 2.45 , 3.85 , 8.889999 , + 10.36 ], dtype=np.float32) + + def tearDown(self) -> None: + self.env_bug.close() + self.tmp_json.close() + return super().tearDown() + + def test_reset(self): + """test that the seed is used correctly in env.reset""" + obs = self.env_bug.reset(seed=0, options={"time serie id": 0}) + maint_ref = 1. * self.env_bug.chronics_handler.real_data.data.maintenance_handler.maintenance + load_q_ref = 1. * obs.load_q + load_p_ref = 1. * obs.get_forecast_arrays()[0] + + obs = self.env_bug.reset(seed=1, options={"time serie id": 0}) + maint_1 = 1. * self.env_bug.chronics_handler.real_data.data.maintenance_handler.maintenance + load_q_1 = 1. * obs.load_q + load_p_1= 1. * obs.get_forecast_arrays()[0] + + obs = self.env_bug.reset(seed=0, options={"time serie id": 0}) + maint_0 = 1. * self.env_bug.chronics_handler.real_data.data.maintenance_handler.maintenance + load_q_0 = 1. * obs.load_q + load_p_0 = 1. * obs.get_forecast_arrays()[0] + + # maintenance, so JSONMaintenanceHandler + assert (maint_ref == maint_0).all() + assert (maint_ref != maint_1).any() + assert (maint_ref.nonzero()[0] == self.maint_ref[0]).all() + assert (maint_ref.nonzero()[1] == self.maint_ref[1]).all() + + # load_q, so LoadQFromPHandler + assert (load_q_ref == load_q_0).all() + # assert (load_q_ref != load_q_1).any() # it's normal it works as this is not random ! + assert (load_q_ref == self.load_q_ref).all() + + # load_p_forecasted, so NoisyForecastHandler + assert (load_p_ref == load_p_0).all() + assert (load_p_ref != load_p_1).any() + assert (np.abs(load_p_ref - self.load_p_ref) <= 1e-6).all() + + def test_runner(self): + """test the runner behaves correctly""" + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + runner = Runner(**self.env_bug.get_params_for_runner()) + res = runner.run(nb_episode=3, + env_seeds=[0, 1, 0], + max_iter=5, + add_detailed_output=True) + obs = res[0][-1].observations[0] + maint_ref = 1. * obs.time_next_maintenance + load_q_ref = 1. * obs.load_q + # load_p_ref = 1. * obs.get_forecast_arrays()[0] not present in episodeData + + obs = res[1][-1].observations[0] + maint_1 = 1. * obs.time_next_maintenance + load_q_1 = 1. * obs.load_q + # load_p_1 = 1. * obs.get_forecast_arrays()[0] not present in episodeData + + obs = res[2][-1].observations[0] + maint_0 = 1. * obs.time_next_maintenance + load_q_0 = 1. * obs.load_q + # load_p_0 = 1. * obs.get_forecast_arrays()[0] not present in episodeData + + # maintenance, so JSONMaintenanceHandler + assert (maint_ref == maint_0).all() + assert (maint_ref != maint_1).any() + # TODO test against a reference data stored in the file + + # load_q, so LoadQFromPHandler + assert (load_q_ref == load_q_0).all() + # assert (load_q_ref != load_q_1).any() # it's normal it works as this is not random ! + assert (load_q_ref == self.load_q_ref).all() + + # load_p_forecasted, so NoisyForecastHandler + # assert (load_p_ref == load_p_0).all() + # assert (load_p_ref != load_p_1).any() + # TODO test that with an agent + + def test_chronics_handler_twice_reset(self): + """test the same results is obtained if the chronics handler is reset twice""" + obs = self.env_bug.reset(seed=0, options={"time serie id": 0}) + maint_ref = 1. * obs.time_next_maintenance + load_q_ref = 1. * obs.load_q + load_p_ref = 1. * obs.get_forecast_arrays()[0] + + self.env_bug.chronics_handler.reset() + maint_1 = 1. * obs.time_next_maintenance + load_q_1 = 1. * obs.load_q + load_p_1 = 1. * obs.get_forecast_arrays()[0] + + assert (np.abs(maint_ref - maint_1) <= 1e-6).all() + assert (np.abs(load_q_ref - load_q_1) <= 1e-6).all() + assert (np.abs(load_p_ref - load_p_1) <= 1e-6).all() + + +if __name__ == "__main__": + unittest.main() diff --git a/grid2op/tests/test_issue_617.py b/grid2op/tests/test_issue_617.py new file mode 100644 index 000000000..e9072a688 --- /dev/null +++ b/grid2op/tests/test_issue_617.py @@ -0,0 +1,102 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# See AUTHORS.txt and https://github.com/rte-france/Grid2Op/pull/319 +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +import unittest +import pandapower as pp +import tempfile +import os +from pathlib import Path +import warnings +import copy +import numpy as np + + +from helper_path_test import PATH_DATA_TEST +import grid2op +from grid2op.Backend.pandaPowerBackend import PandaPowerBackend +from grid2op.Action.playableAction import PlayableAction +from grid2op.Observation.completeObservation import CompleteObservation +from grid2op.Reward.flatReward import FlatReward +from grid2op.Rules.DefaultRules import DefaultRules +from grid2op.Chronics.multiFolder import Multifolder +from grid2op.Chronics.gridStateFromFileWithForecasts import GridStateFromFileWithForecasts +from grid2op.Chronics import ChangeNothing + + +class Issue617Tester(unittest.TestCase): + def setUp(self): + self.env_name = "l2rpn_case14_sandbox" + # create first env + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + root_path = Path(os.path.abspath(PATH_DATA_TEST)) + self.env_path = tempfile.TemporaryDirectory(dir=root_path) + self.tol = 1e-6 + + def tearDown(self) -> None: + self.env_path.cleanup() + return super().tearDown() + + def create_config(self, env_path:Path, network, **kwargs): + thermal_limits = [10_000. * el for el in network.line.max_i_ka] # Thermal Limit in Amps (A) + with open(Path(env_path.name) / "config.py", "w") as config: + # Import Statements + config.writelines( + [f"from {value.__module__} import {value.__name__}\n" for value in kwargs.values() if hasattr(value, "__module__")] + ) + + # Config Dictionary + config.writelines( + ["config = {\n"] + + [f"'{k}':{getattr(v,'__name__', 'None')},\n" for k,v in kwargs.items()] + + [f"'thermal_limits':{thermal_limits}\n"] + + ["}\n"] + ) + return thermal_limits + + def create_pp_net(self): + network = pp.create_empty_network() + pp.create_buses(network, nr_buses=2, vn_kv=20.0) + pp.create_gen(network, bus=0, p_mw=10.0, min_p_mw=-1e9, max_p_mw=1e9, slack=True, slack_weight=1.0) + pp.create_line(network, from_bus=0, to_bus=1, length_km=10.0, std_type="NAYY 4x50 SE") + pp.create_load(network, bus=1, p_mw=10.0, controllable=False) + pp.to_json(network, Path(self.env_path.name) / "grid.json") + return network + + def test_can_make_env(self): + network = self.create_pp_net() + thermal_limits = self.create_config(self.env_path, + network, + backend=PandaPowerBackend, + action=PlayableAction, + observation_class=CompleteObservation, + reward_class=FlatReward, + gamerules_class=DefaultRules, + chronics_class=Multifolder, + grid_value_class=GridStateFromFileWithForecasts, + voltagecontroler_class=None, + names_chronics_to_grid=None) + + pp.runpp(network, numba=True, lightsim2grid=False, max_iteration=10, distributed_slack=False, init="dc", check_connectivity=False) + assert network.converged + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make(self.env_path.name, chronics_class=ChangeNothing) + assert (np.abs(env.get_thermal_limit() - thermal_limits) <= 1e-6).all() + obs = env.reset() + assert (np.abs(obs.p_or - network.res_line["p_from_mw"]) <= self.tol).all() + assert (np.abs(obs.q_or - network.res_line["q_from_mvar"]) <= self.tol).all() + assert (np.abs(obs.a_or - 1000. * network.res_line["i_from_ka"]) <= self.tol).all() + obs, reward, done, info = env.step(env.action_space()) + assert (np.abs(obs.p_or - network.res_line["p_from_mw"]) <= self.tol).all() + assert (np.abs(obs.q_or - network.res_line["q_from_mvar"]) <= self.tol).all() + assert (np.abs(obs.a_or - 1000. * network.res_line["i_from_ka"]) <= self.tol).all() + + +if __name__ == "__main__": + unittest.main() diff --git a/grid2op/tests/test_multi_steps_forecasts.py b/grid2op/tests/test_multi_steps_forecasts.py index 2608f3cb0..0dc7ac685 100644 --- a/grid2op/tests/test_multi_steps_forecasts.py +++ b/grid2op/tests/test_multi_steps_forecasts.py @@ -80,7 +80,7 @@ def test_chunk_size(self): def test_max_iter(self): max_iter = 4 - self.env.chronics_handler.set_max_iter(max_iter) + self.env.set_max_iter(max_iter) obs = self.env.reset() self.aux_test_for_consistent(obs) diff --git a/grid2op/tests/test_pickling.py b/grid2op/tests/test_pickling.py index ea262d583..c8114d93e 100644 --- a/grid2op/tests/test_pickling.py +++ b/grid2op/tests/test_pickling.py @@ -20,13 +20,17 @@ ScalerAttrConverter, ) +_NAME_FOR_THIS_TEST = __name__ + "for_mp_test" + with warnings.catch_warnings(): # this needs to be imported in the main module for multiprocessing to work "approximately" warnings.filterwarnings("ignore") - _ = grid2op.make("l2rpn_case14_sandbox", test=True, _add_to_name=__name__+"for_mp_test") - - + _ = grid2op.make("l2rpn_case14_sandbox", + test=True, + _add_to_name=_NAME_FOR_THIS_TEST) + + class TestMultiProc(unittest.TestCase): @staticmethod def f(env_gym): @@ -41,7 +45,9 @@ def test_basic(self): with warnings.catch_warnings(): warnings.filterwarnings("ignore") env = grid2op.make( - "l2rpn_case14_sandbox", test=True, _add_to_name=__name__+"for_mp_test" + "l2rpn_case14_sandbox", + test=True, + _add_to_name=_NAME_FOR_THIS_TEST ) env_gym = GymEnv(env) @@ -71,15 +77,15 @@ def test_basic(self): ["rho", "gen_p", "load_p", "topo_vect", "actual_dispatch"] ) ob_space = ob_space.reencode_space( - "actual_dispatch", ScalerAttrConverter(substract=0.0, divide=env.gen_pmax) + "actual_dispatch", ScalerAttrConverter(substract=0.0, divide=1. * type(env).gen_pmax) ) ob_space = ob_space.reencode_space( - "gen_p", ScalerAttrConverter(substract=0.0, divide=env.gen_pmax) + "gen_p", ScalerAttrConverter(substract=0.0, divide=1. * type(env).gen_pmax) ) ob_space = ob_space.reencode_space( "load_p", ScalerAttrConverter( - substract=obs_gym["load_p"], divide=0.5 * obs_gym["load_p"] + substract=1. * obs_gym["load_p"], divide=0.5 * obs_gym["load_p"] ), ) env_gym.observation_space = ob_space @@ -95,4 +101,11 @@ def test_basic(self): if __name__ == "__main__": + with warnings.catch_warnings(): + # this needs to be imported in the main module for multiprocessing to work "approximately" + warnings.filterwarnings("ignore") + _ = grid2op.make("l2rpn_case14_sandbox", + test=True, + _add_to_name=__name__+"for_mp_test") + unittest.main() diff --git a/grid2op/tests/test_resest_options.py b/grid2op/tests/test_resest_options.py new file mode 100644 index 000000000..5a5d6b2b0 --- /dev/null +++ b/grid2op/tests/test_resest_options.py @@ -0,0 +1,291 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +import warnings +import grid2op +from grid2op.Exceptions import Grid2OpException +import unittest +import pdb + + +class InitTSOptions(unittest.TestCase): + """test the "init ts" options in env.reset() """ + def setUp(self) -> None: + self.env_name = "l2rpn_case14_sandbox" + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make(self.env_name, test=True, + _add_to_name=type(self).__name__) + + def test_function_ok(self): + obs = self.env.reset() # normal reset + assert obs.year == 2019 + assert obs.month == 1 + assert obs.day == 6 + assert obs.hour_of_day == 0 + assert obs.minute_of_hour == 0 + + obs = self.env.reset(options={"init ts": 1}) # skip the first step, start at 5 minutes + assert obs.year == 2019 + assert obs.month == 1 + assert obs.day == 6 + assert obs.hour_of_day == 0 + assert obs.minute_of_hour == 5, f"{ obs.minute_of_hour} vs 5" + + obs = self.env.reset(options={"init ts": 2}) # start after 10 minutes, 2 steps + assert obs.year == 2019 + assert obs.month == 1 + assert obs.day == 6 + assert obs.hour_of_day == 0 + assert obs.minute_of_hour == 10, f"{ obs.minute_of_hour} vs 10" + + obs = self.env.reset(options={"init ts": 6}) # start after 6steps (30 minutes) + assert obs.year == 2019 + assert obs.month == 1 + assert obs.day == 6 + assert obs.hour_of_day == 0 + assert obs.minute_of_hour == 30, f"{ obs.minute_of_hour} vs 30" + + obs = self.env.reset(options={"init ts": 12}) # start at the 12th step + assert obs.year == 2019 + assert obs.month == 1 + assert obs.day == 6 + assert obs.hour_of_day == 1, f"{ obs.minute_of_hour} vs 1" + assert obs.minute_of_hour == 0, f"{ obs.minute_of_hour} vs 0" + + obs = self.env.reset(options={"init ts": 12 * 24}) # start after exactly 1 day + assert obs.year == 2019 + assert obs.month == 1 + assert obs.day == 7, f"{ obs.day} vs 7" + assert obs.hour_of_day == 0, f"{ obs.hour_of_day} vs 1" + assert obs.minute_of_hour == 0, f"{ obs.minute_of_hour} vs 0" + + def test_soft_overflow(self): + """check that the lines are not on soft overflow (obs.timestep_overflow == 0 just after reset)""" + line_id = 3 + obs = self.env.reset(options={"time serie id": 0}) + th_lim = 1. * self.env.get_thermal_limit() + th_lim[line_id] = 0.6 * obs.a_or[line_id] + self.env.set_thermal_limit(th_lim) + obs = self.env.reset(options={"time serie id": 0}) + assert (obs.timestep_overflow == 0).all() + assert obs.rho[line_id] > 1. + assert obs.line_status[line_id] + + obs = self.env.reset(options={"time serie id": 0}) + assert (obs.timestep_overflow == 0).all() + assert obs.rho[line_id] > 1. + assert obs.line_status[line_id] + + obs = self.env.reset(options={"time serie id": 0, "init ts": 1}) + assert (obs.timestep_overflow == 0).all() + assert obs.rho[line_id] > 1. + assert obs.line_status[line_id] + + obs = self.env.reset(options={"time serie id": 0, "init ts": 2}) + assert (obs.timestep_overflow == 0).all() + assert obs.rho[line_id] > 1. + assert obs.line_status[line_id] + + obs = self.env.reset(options={"time serie id": 0, "init ts": 6}) + assert (obs.timestep_overflow == 0).all() + assert obs.rho[line_id] > 1. + assert obs.line_status[line_id] + + def test_hard_overflow(self): + """check lines are disconnected if on hard overflow at the beginning""" + line_id = 3 + obs = self.env.reset(options={"time serie id": 0}) + th_lim = 1. * self.env.get_thermal_limit() + th_lim[line_id] = 0.4 * obs.a_or[line_id] + self.env.set_thermal_limit(th_lim) + obs = self.env.reset(options={"time serie id": 0}) + assert (obs.timestep_overflow == 0).all() + assert obs.rho[line_id] == 0. + assert not obs.line_status[line_id] + assert obs.time_before_cooldown_line[line_id] == 0 + + obs = self.env.reset(options={"time serie id": 0}) + assert (obs.timestep_overflow == 0).all() + assert obs.rho[line_id] == 0. + assert not obs.line_status[line_id] + assert obs.time_before_cooldown_line[line_id] == 0 + + obs = self.env.reset(options={"time serie id": 0, "init ts": 1}) + assert (obs.timestep_overflow == 0).all() + assert obs.rho[line_id] == 0. + assert not obs.line_status[line_id] + assert obs.time_before_cooldown_line[line_id] == 0 + + obs = self.env.reset(options={"time serie id": 0, "init ts": 2}) + assert (obs.timestep_overflow == 0).all() + assert obs.rho[line_id] == 0. + assert not obs.line_status[line_id] + assert obs.time_before_cooldown_line[line_id] == 0 + + obs = self.env.reset(options={"time serie id": 0, "init ts": 6}) + assert (obs.timestep_overflow == 0).all() + assert obs.rho[line_id] == 0. + assert not obs.line_status[line_id] + assert obs.time_before_cooldown_line[line_id] == 0 + + + def test_raise_if_args_not_correct(self): + with self.assertRaises(Grid2OpException): + # string and not int + obs = self.env.reset(options={"init ts": "treliug"}) + with self.assertRaises(Grid2OpException): + # float which is not an int + obs = self.env.reset(options={"init ts": 1.5}) + with self.assertRaises(Grid2OpException): + # value too small + obs = self.env.reset(options={"init ts": 0}) + + # should work with a float convertible to an int + obs = self.env.reset(options={"time serie id": 0, "init ts": 6.}) + + +class MaxStepOptions(unittest.TestCase): + """test the "max step" options in env.reset() """ + def setUp(self) -> None: + self.env_name = "l2rpn_case14_sandbox" + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make(self.env_name, test=True, + _add_to_name=type(self).__name__) + + def test_raise_if_args_not_correct(self): + with self.assertRaises(Grid2OpException): + # string and not int + obs = self.env.reset(options={"max step": "treliug"}) + with self.assertRaises(Grid2OpException): + # float which is not an int + obs = self.env.reset(options={"max step": 1.5}) + + with self.assertRaises(Grid2OpException): + # value too small + obs = self.env.reset(options={"max step": 0}) + + # should work with a float convertible to an int + obs = self.env.reset(options={"time serie id": 0, "max step": 6.}) + + def test_function_ok(self): + obs = self.env.reset() # normal reset + assert obs.max_step == 575, f"{obs.max_step} vs 575" + + # enough data to be limited + obs = self.env.reset(options={"max step": 5}) + assert obs.max_step == 5, f"{obs.max_step} vs 5" + + # limit has no effect: not enough data anyway + obs = self.env.reset(options={"max step": 800}) + assert obs.max_step == 575, f"{obs.max_step} vs 575" + + def test_no_impact_next_reset(self): + obs = self.env.reset() # normal reset + assert obs.max_step == 575, f"{obs.max_step} vs 575" + + # enough data to be limited + obs = self.env.reset(options={"max step": 5}) + assert obs.max_step == 5, f"{obs.max_step} vs 5" + + obs = self.env.reset() # normal reset + assert obs.max_step == 575, f"{obs.max_step} vs 575" + + def test_remember_previous_max_iter(self): + obs = self.env.reset() # normal reset + assert obs.max_step == 575, f"{obs.max_step} vs 575" + + self.env.set_max_iter(200) + obs = self.env.reset() # normal reset + assert obs.max_step == 200, f"{obs.max_step} vs 200" + + # use the option to limit + obs = self.env.reset(options={"max step": 5}) + assert obs.max_step == 5, f"{obs.max_step} vs 5" + + # check it remembers the previous limit + obs = self.env.reset() # normal reset (but 200 were set) + assert obs.max_step == 200, f"{obs.max_step} vs 200" + + # set back the limit to "maximum in the time serie" + self.env.set_max_iter(-1) + obs = self.env.reset() # normal reset + assert obs.max_step == 575, f"{obs.max_step} vs 575" + + # limit for this reset only + obs = self.env.reset(options={"max step": 5}) + assert obs.max_step == 5, f"{obs.max_step} vs 5" + + # check again the right limit was applied + obs = self.env.reset() # normal reset (but 575 were set back) + assert obs.max_step == 575, f"{obs.max_step} vs 575" + + def test_max_step_and_init_ts(self): + """test that episode duration is properly computed and updated in + the observation when both max step and init ts are set at the same time""" + obs = self.env.reset() # normal reset + assert obs.max_step == 575, f"{obs.max_step} vs 575" + assert obs.year == 2019 + assert obs.month == 1 + assert obs.day == 6 + assert obs.hour_of_day == 0 + assert obs.minute_of_hour == 0 + + obs = self.env.reset(options={"init ts": 12 * 24, "max step": 24}) # start after exactly 1 day for 2 hours + assert obs.year == 2019 + assert obs.month == 1 + assert obs.day == 7, f"{ obs.day} vs 7" + assert obs.hour_of_day == 0, f"{ obs.hour_of_day} vs 1" + assert obs.minute_of_hour == 0, f"{ obs.minute_of_hour} vs 0" + assert obs.max_step == 24, f"{obs.max_step} vs 24" + + obs = self.env.reset(options={"init ts": 12 * 24}) # start after exactly 1 day without any max + assert obs.year == 2019 + assert obs.month == 1 + assert obs.day == 7, f"{ obs.day} vs 7" + assert obs.hour_of_day == 0, f"{ obs.hour_of_day} vs 1" + assert obs.minute_of_hour == 0, f"{ obs.minute_of_hour} vs 0" + assert obs.max_step == 575, f"{obs.max_step} vs 575" + + obs = self.env.reset() # normal reset + assert obs.max_step == 575, f"{obs.max_step} vs 575" + assert obs.year == 2019 + assert obs.month == 1 + assert obs.day == 6 + assert obs.hour_of_day == 0 + assert obs.minute_of_hour == 0 + + obs = self.env.reset(options={"max step": 288}) # don't skip anything, but last only 1 day + assert obs.year == 2019 + assert obs.month == 1 + assert obs.day == 6, f"{ obs.day} vs 6" + assert obs.hour_of_day == 0, f"{ obs.hour_of_day} vs 1" + assert obs.minute_of_hour == 0, f"{ obs.minute_of_hour} vs 0" + assert obs.max_step == 288, f"{obs.max_step} vs 288" + + obs = self.env.reset(options={"init ts": 12 * 24, "max step": 700}) # start after exactly 1 day for too much steps + assert obs.year == 2019 + assert obs.month == 1 + assert obs.day == 7, f"{ obs.day} vs 7" + assert obs.hour_of_day == 0, f"{ obs.hour_of_day} vs 1" + assert obs.minute_of_hour == 0, f"{ obs.minute_of_hour} vs 0" + # 288 here because the limit is the time series ! + assert obs.max_step == 287, f"{obs.max_step} vs 287" + + obs = self.env.reset() # normal reset + assert obs.max_step == 575, f"{obs.max_step} vs 575" + assert obs.year == 2019 + assert obs.month == 1 + assert obs.day == 6 + assert obs.hour_of_day == 0 + assert obs.minute_of_hour == 0 + + +if __name__ == "__main__": + unittest.main() diff --git a/grid2op/tests/test_reset_options_runner.py b/grid2op/tests/test_reset_options_runner.py new file mode 100644 index 000000000..94da9ada1 --- /dev/null +++ b/grid2op/tests/test_reset_options_runner.py @@ -0,0 +1,910 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + + +import warnings +import unittest + +import grid2op +from grid2op.Runner import Runner +from grid2op.tests.helper_path_test import * + + +class TestResetOptionRunner(unittest.TestCase): + def _env_path(self): + return "l2rpn_case14_sandbox" + + def setUp(self) -> None: + self.env_nm = self._env_path() + self.max_iter = 5 + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make(self.env_nm, + test=True + ) + self.runner = Runner(**self.env.get_params_for_runner()) + + def tearDown(self) -> None: + self.env.close() + self.runner._clean_up() + return super().tearDown() + + def test_run_one_episode_ts_id(self): + with warnings.catch_warnings(): + warnings.filterwarnings("error") # check it does not raise any error + res = self.runner.run_one_episode(reset_options={"time serie id": 1}, + max_iter=self.max_iter, + detailed_output=True + ) + assert res[1]== '0001' + assert res[3] == self.max_iter + assert res[4] == self.max_iter + + with warnings.catch_warnings(): + warnings.filterwarnings("error") # check it does not raise any error + res = self.runner.run_one_episode(reset_options={}, + episode_id=1, + max_iter=self.max_iter, + detailed_output=True + ) + assert res[1]== '0001' + assert res[3] == self.max_iter + assert res[4] == self.max_iter + + # check the correct episode id is used + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + res = self.runner.run_one_episode(reset_options={"time serie id": 1}, + max_iter=self.max_iter, + episode_id=0, + detailed_output=True + ) + assert res[1]== '0000' + assert res[3] == self.max_iter + assert res[4] == self.max_iter + + def test_run_one_episode_warning_raised_ts_id(self): + # check it does raise an error + with self.assertRaises(UserWarning): + with warnings.catch_warnings(): + warnings.filterwarnings("error") + res = self.runner.run_one_episode(reset_options={"time serie id": 1}, + max_iter=self.max_iter, + episode_id=3, + detailed_output=True + ) + + def test_run_onesingle_ep_ts_id(self): + # one reset option + res = self.runner.run(nb_episode=1, + reset_options={"time serie id": 1}, + max_iter=self.max_iter + ) + assert res[0][1]== '0001' + assert res[0][3] == self.max_iter + assert res[0][4] == self.max_iter + + # one list (of one element here) + res = self.runner.run(nb_episode=1, + reset_options=[{"time serie id": 1}], + max_iter=self.max_iter + ) + assert res[0][1]== '0001' + assert res[0][3] == self.max_iter + assert res[0][4] == self.max_iter + + # one tuple (of one element here) + res = self.runner.run(nb_episode=1, + reset_options=({"time serie id": 1}, ), + max_iter=self.max_iter + ) + assert res[0][1]== '0001' + assert res[0][3] == self.max_iter + assert res[0][4] == self.max_iter + + # check the correct episode id is used + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + res = self.runner.run(nb_episode=1, + reset_options={"time serie id": 1}, + max_iter=self.max_iter, + episode_id=[0] + ) + assert res[0][1]== '0000' + assert res[0][3] == self.max_iter + assert res[0][4] == self.max_iter + + # check the warning is raised + with self.assertRaises(UserWarning): + with warnings.catch_warnings(): + warnings.filterwarnings("error") + res = self.runner.run(nb_episode=1, + reset_options={"time serie id": 1}, + max_iter=self.max_iter, + episode_id=[0] + ) + + def test_run_two_eps_seq_ts_id(self, nb_process=1): + # one reset option + res = self.runner.run(nb_episode=2, + reset_options={"time serie id": 1}, + max_iter=self.max_iter, + nb_process=nb_process + ) + for el in res: + assert el[1]== '0001' + assert el[3] == self.max_iter + assert el[4] == self.max_iter + + # one list (of one element here) + res = self.runner.run(nb_episode=2, + reset_options=[{"time serie id": 1}, {"time serie id": 1}], + max_iter=self.max_iter, + nb_process=nb_process + ) + for el in res: + assert el[1]== '0001' + assert el[3] == self.max_iter + assert el[4] == self.max_iter + + # one tuple (of one element here) + res = self.runner.run(nb_episode=2, + reset_options=({"time serie id": 1}, {"time serie id": 1}), + max_iter=self.max_iter, + nb_process=nb_process + ) + for el in res: + assert el[1]== '0001' + assert el[3] == self.max_iter + assert el[4] == self.max_iter + + # check the correct episode id is used + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + res = self.runner.run(nb_episode=2, + reset_options={"time serie id": 1}, + max_iter=self.max_iter, + episode_id=[0, 1], + nb_process=nb_process + ) + assert res[0][1]== '0000' + assert res[1][1]== '0001' + for el in res: + assert el[3] == self.max_iter + assert el[4] == self.max_iter + + # check the warning is raised + with self.assertRaises(UserWarning): + with warnings.catch_warnings(): + warnings.filterwarnings("error") + res = self.runner.run(nb_episode=2, + reset_options={"time serie id": 1}, + max_iter=self.max_iter, + episode_id=[0, 1], + nb_process=nb_process + ) + + def test_run_two_eps_seq_two_options_ts_id(self, nb_process=1): + # one list (of one element here) + res = self.runner.run(nb_episode=2, + reset_options=[{"time serie id": 0}, {"time serie id": 1}], + max_iter=self.max_iter, + nb_process=nb_process + ) + assert res[0][1]== '0000' + assert res[1][1]== '0001' + for el in res: + assert el[3] == self.max_iter + assert el[4] == self.max_iter + + # one tuple (of one element here) + res = self.runner.run(nb_episode=2, + reset_options=({"time serie id": 0}, {"time serie id": 1}), + max_iter=self.max_iter, + nb_process=nb_process + ) + assert res[0][1]== '0000' + assert res[1][1]== '0001' + for el in res: + assert el[3] == self.max_iter + assert el[4] == self.max_iter + + # check the correct episode id is used + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + res = self.runner.run(nb_episode=2, + reset_options={"time serie id": 1}, + max_iter=self.max_iter, + episode_id=[0, 1], + nb_process=nb_process + ) + assert res[0][1]== '0000' + assert res[1][1]== '0001' + for el in res: + assert el[3] == self.max_iter + assert el[4] == self.max_iter + + # check the warning is raised + with self.assertRaises(UserWarning): + with warnings.catch_warnings(): + warnings.filterwarnings("error") + res = self.runner.run(nb_episode=2, + reset_options={"time serie id": 1}, + max_iter=self.max_iter, + episode_id=[0, 1], + nb_process=nb_process + ) + + def test_run_two_eps_par_ts_id(self): + self.test_run_two_eps_seq_ts_id(nb_process=2) + + def test_run_two_eps_par_two_opts_ts_id(self): + self.test_run_two_eps_seq_two_options_ts_id(nb_process=2) + + def test_fail_when_needed(self): + # wrong type + with self.assertRaises(RuntimeError): + res = self.runner.run(nb_episode=2, + reset_options=1, + episode_id=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, + ) + with self.assertRaises(RuntimeError): + res = self.runner.run(nb_episode=2, + reset_options=[1, {"time serie id": 1}], + episode_id=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, + ) + with self.assertRaises(RuntimeError): + res = self.runner.run(nb_episode=2, + reset_options=[{"time serie id": 1}, 1], + episode_id=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, + ) + + # wrong size (too big) + with self.assertRaises(RuntimeError): + res = self.runner.run(nb_episode=2, + reset_options=[{"time serie id": 1}, + {"time serie id": 1}, + {"time serie id": 1}], + episode_id=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, + ) + # wrong size (too small) + with self.assertRaises(RuntimeError): + res = self.runner.run(nb_episode=2, + reset_options=[{"time serie id": 1}], + episode_id=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, + ) + # wrong key (beginning) + with self.assertRaises(RuntimeError): + res = self.runner.run(nb_episode=2, + reset_options=[{"bleurk": 1}, {"time serie id": 1}], + episode_id=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, + ) + # wrong key (end) + with self.assertRaises(RuntimeError): + res = self.runner.run(nb_episode=2, + reset_options=[{"time serie id": 1}, {"bleurk": 1}], + episode_id=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, + ) + # wrong key (when alone) + with self.assertRaises(RuntimeError): + res = self.runner.run(nb_episode=2, + reset_options={"bleurk": 1}, + episode_id=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, + ) + + def test_run_one_episode_max_it(self): + with warnings.catch_warnings(): + warnings.filterwarnings("error") # check it does not raise any error + res = self.runner.run_one_episode(reset_options={"max step": self.max_iter, "time serie id": 1}, + detailed_output=True + ) + assert res[1]== '0001' + assert res[3] == self.max_iter + assert res[4] == self.max_iter + + with warnings.catch_warnings(): + warnings.filterwarnings("error") # check it does not raise any error + res = self.runner.run_one_episode(reset_options={"time serie id": 1}, + max_iter=self.max_iter, + detailed_output=True + ) + assert res[1]== '0001' + assert res[3] == self.max_iter + assert res[4] == self.max_iter + + # check the correct max iter is used + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + res = self.runner.run_one_episode(reset_options={"time serie id": 1, "max step": self.max_iter + 1}, + max_iter=self.max_iter, + episode_id=0, + detailed_output=True + ) + assert res[1]== '0000' + assert res[3] == self.max_iter + assert res[4] == self.max_iter + + def test_run_one_episode_warning_raised_max_it(self): + # check it does raise an error + with self.assertRaises(UserWarning): + with warnings.catch_warnings(): + warnings.filterwarnings("error") + res = self.runner.run_one_episode(reset_options={"time serie id": 1, "max step": self.max_iter + 3}, + max_iter=self.max_iter + ) + + def test_run_onesingle_ep_max_it(self): + # one reset option + res = self.runner.run(nb_episode=1, + reset_options={"time serie id": 1, "max step": self.max_iter}, + ) + assert res[0][1]== '0001' + assert res[0][3] == self.max_iter + assert res[0][4] == self.max_iter + + # one list (of one element here) + res = self.runner.run(nb_episode=1, + reset_options=[{"time serie id": 1, "max step": self.max_iter}], + ) + assert res[0][1]== '0001' + assert res[0][3] == self.max_iter + assert res[0][4] == self.max_iter + + # one tuple (of one element here) + res = self.runner.run(nb_episode=1, + reset_options=({"time serie id": 1, "max step": self.max_iter}, ), + ) + assert res[0][1]== '0001' + assert res[0][3] == self.max_iter + assert res[0][4] == self.max_iter + + # check the correct episode id is used + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + res = self.runner.run(nb_episode=1, + reset_options={"time serie id": 0, "max step": self.max_iter + 3}, + max_iter=self.max_iter, + ) + assert res[0][1]== '0000' + assert res[0][3] == self.max_iter + assert res[0][4] == self.max_iter + + # check the warning is raised + with self.assertRaises(UserWarning): + with warnings.catch_warnings(): + warnings.filterwarnings("error") + res = self.runner.run(nb_episode=1, + reset_options={"time serie id": 0, "max step": self.max_iter + 3}, + max_iter=self.max_iter + ) + + def test_run_two_eps_seq_max_it(self, nb_process=1): + # one reset option + res = self.runner.run(nb_episode=2, + reset_options={"time serie id": 1, "max step": self.max_iter }, + nb_process=nb_process + ) + for el in res: + assert el[1]== '0001' + assert el[3] == self.max_iter + assert el[4] == self.max_iter + + # one list (of the same element here) + res = self.runner.run(nb_episode=2, + reset_options=[{"time serie id": 1, "max step": self.max_iter}, + {"time serie id": 1, "max step": self.max_iter}], + nb_process=nb_process + ) + for el in res: + assert el[1]== '0001' + assert el[3] == self.max_iter + assert el[4] == self.max_iter + + # one tuple (of the same element here) + res = self.runner.run(nb_episode=2, + reset_options=({"time serie id": 1, "max step": self.max_iter}, + {"time serie id": 1, "max step": self.max_iter}), + nb_process=nb_process + ) + for el in res: + assert el[1]== '0001' + assert el[3] == self.max_iter + assert el[4] == self.max_iter + + # check the correct "max iter" is used + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + res = self.runner.run(nb_episode=2, + reset_options={"max step": self.max_iter + 3}, + max_iter=self.max_iter, + episode_id=[0, 1], + nb_process=nb_process + ) + assert res[0][1]== '0000' + assert res[1][1]== '0001' + for el in res: + assert el[3] == self.max_iter + assert el[4] == self.max_iter + + # check the warning is raised + with self.assertRaises(UserWarning): + with warnings.catch_warnings(): + warnings.filterwarnings("error") + res = self.runner.run(nb_episode=2, + reset_options={"max step": self.max_iter + 3}, + max_iter=self.max_iter, + episode_id=[0, 1], + nb_process=nb_process + ) + + def test_run_two_eps_seq_two_options_max_it(self, nb_process=1): + # one list (of two different elements here) + res = self.runner.run(nb_episode=2, + reset_options=[{"time serie id": 0, "max step": self.max_iter + 1}, + {"time serie id": 1, "max step": self.max_iter + 2}], + nb_process=nb_process + ) + assert res[0][1]== '0000' + assert res[0][3] == self.max_iter + 1 + assert res[0][4] == self.max_iter + 1 + assert res[1][1]== '0001' + assert res[1][3] == self.max_iter + 2 + assert res[1][4] == self.max_iter + 2 + + # one tuple (of two different elements here) + res = self.runner.run(nb_episode=2, + reset_options=({"time serie id": 0, "max step": self.max_iter + 1}, + {"time serie id": 1, "max step": self.max_iter + 2}), + nb_process=nb_process + ) + assert res[0][1]== '0000' + assert res[0][3] == self.max_iter + 1 + assert res[0][4] == self.max_iter + 1 + assert res[1][1]== '0001' + assert res[1][3] == self.max_iter + 2 + assert res[1][4] == self.max_iter + 2 + + # check the correct max iter is used + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + res = self.runner.run(nb_episode=2, + reset_options={"max step": self.max_iter + 1}, + max_iter=self.max_iter, + episode_id=[0, 1], + nb_process=nb_process + ) + assert res[0][1]== '0000' + assert res[1][1]== '0001' + for el in res: + assert el[3] == self.max_iter + assert el[4] == self.max_iter + + # check the warning is raised + with self.assertRaises(UserWarning): + with warnings.catch_warnings(): + warnings.filterwarnings("error") + res = self.runner.run(nb_episode=2, + reset_options={"max step": self.max_iter + 1}, + max_iter=self.max_iter, + episode_id=[0, 1], + nb_process=nb_process + ) + + def test_run_two_eps_par_max_it(self): + self.test_run_two_eps_seq_max_it(nb_process=2) + + def test_run_two_eps_par_two_opts_max_it(self): + self.test_run_two_eps_seq_two_options_max_it(nb_process=2) + + def test_run_one_episode_init_act(self): + with warnings.catch_warnings(): + warnings.filterwarnings("error") # check it does not raise any error + res = self.runner.run_one_episode(reset_options={"max step": self.max_iter, "time serie id": 1, + "init state": {"set_line_status": [(1, -1)], "method": "ignore"}}, + detailed_output=True + ) + assert res[1]== '0001' + assert res[3] == self.max_iter + assert res[4] == self.max_iter + + ep_data = res[-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == -1 + assert not init_obs.line_status[1] + + with warnings.catch_warnings(): + warnings.filterwarnings("error") # check it does not raise any error + res = self.runner.run_one_episode(reset_options={"time serie id": 1}, + max_iter=self.max_iter, + init_state={"set_line_status": [(1, -1)], "method": "ignore"}, + detailed_output=True + ) + assert res[1]== '0001' + assert res[3] == self.max_iter + assert res[4] == self.max_iter + + ep_data = res[-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == -1 + assert not init_obs.line_status[1] + + # check the correct init state is used + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + res = self.runner.run_one_episode(reset_options={"time serie id": 1, + "max step": self.max_iter + 1, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"} + }, + max_iter=self.max_iter, + episode_id=0, + init_state={"set_line_status": [(1, -1)], "method": "ignore"}, + detailed_output=True + ) + assert res[1]== '0000' + assert res[3] == self.max_iter + assert res[4] == self.max_iter + + ep_data = res[-1] + init_obs = ep_data.observations[0] + # line 1 is disco + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == -1 + assert not init_obs.line_status[1] + # line 0 should not + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == 1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == 1 + assert init_obs.line_status[0] + + def test_run_one_episode_warning_raised_init_act(self): + # check it does raise an error + with self.assertRaises(UserWarning): + with warnings.catch_warnings(): + warnings.filterwarnings("error") + res = self.runner.run_one_episode(reset_options={"time serie id": 1, + "max step": self.max_iter + 3, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"}}, + init_state={"set_line_status": [(1, -1)], "method": "ignore"}, + ) + + def test_run_onesingle_ep_init_act(self): + # one reset option + res = self.runner.run(nb_episode=1, + reset_options={"time serie id": 1, + "max step": self.max_iter, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"} + }, + add_detailed_output=True + ) + assert res[0][1]== '0001' + assert res[0][3] == self.max_iter + assert res[0][4] == self.max_iter + ep_data = res[0][-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == -1 + assert not init_obs.line_status[0] + + # one list (of one element here) + res = self.runner.run(nb_episode=1, + reset_options=[{"time serie id": 1, + "max step": self.max_iter, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"} + }], + add_detailed_output=True + ) + assert res[0][1]== '0001' + assert res[0][3] == self.max_iter + assert res[0][4] == self.max_iter + ep_data = res[0][-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == -1 + assert not init_obs.line_status[0] + + # one tuple (of one element here) + res = self.runner.run(nb_episode=1, + reset_options=({"time serie id": 1, + "max step": self.max_iter, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"} + }, ), + add_detailed_output=True + ) + assert res[0][1]== '0001' + assert res[0][3] == self.max_iter + assert res[0][4] == self.max_iter + ep_data = res[0][-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == -1 + assert not init_obs.line_status[0] + + # check the correct init action is used + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + res = self.runner.run(nb_episode=1, + reset_options={"time serie id": 0, + "max step": self.max_iter + 3, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"}}, + max_iter=self.max_iter, + init_states={"set_line_status": [(1, -1)], "method": "ignore"}, + add_detailed_output=True + ) + assert res[0][1]== '0000' + assert res[0][3] == self.max_iter + assert res[0][4] == self.max_iter + ep_data = res[0][-1] + init_obs = ep_data.observations[0] + # line 1 is disco + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == -1 + assert not init_obs.line_status[1] + # line 0 should not + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == 1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == 1 + assert init_obs.line_status[0] + + # check the warning is raised + with self.assertRaises(UserWarning): + with warnings.catch_warnings(): + warnings.filterwarnings("error") + res = self.runner.run(nb_episode=1, + reset_options={"time serie id": 0, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"}}, + max_iter=self.max_iter, + init_states={"set_line_status": [(1, -1)], "method": "ignore"}, + add_detailed_output=True + ) + + def test_run_two_eps_seq_init_act(self, nb_process=1): + # one reset option + res = self.runner.run(nb_episode=2, + reset_options={"time serie id": 1, + "max step": self.max_iter, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"} + }, + nb_process=nb_process, + add_detailed_output=True + ) + for el in res: + assert el[1]== '0001' + assert el[3] == self.max_iter + assert el[4] == self.max_iter + ep_data = el[-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == -1 + assert not init_obs.line_status[0] + + # one list (of the same element here) + res = self.runner.run(nb_episode=2, + reset_options=[{"time serie id": 1, + "max step": self.max_iter, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"}}, + {"time serie id": 1, + "max step": self.max_iter, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"}}], + nb_process=nb_process, + add_detailed_output=True + ) + for el in res: + assert el[1]== '0001' + assert el[3] == self.max_iter + assert el[4] == self.max_iter + ep_data = el[-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == -1 + assert not init_obs.line_status[0] + + # one tuple (of the same element here) + res = self.runner.run(nb_episode=2, + reset_options=({"time serie id": 1, + "max step": self.max_iter, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"}}, + {"time serie id": 1, + "max step": self.max_iter, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"}}), + nb_process=nb_process, + add_detailed_output=True + ) + for el in res: + assert el[1]== '0001' + assert el[3] == self.max_iter + assert el[4] == self.max_iter + ep_data = el[-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == -1 + assert not init_obs.line_status[0] + + # check the correct "init state" is used + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + res = self.runner.run(nb_episode=2, + reset_options={"init state": {"set_line_status": [(0, -1)], "method": "ignore"}}, + max_iter=self.max_iter, + episode_id=[0, 1], + nb_process=nb_process, + init_states={"set_line_status": [(1, -1)], "method": "ignore"}, + add_detailed_output=True + ) + assert res[0][1]== '0000' + assert res[1][1]== '0001' + for el in res: + assert el[3] == self.max_iter + assert el[4] == self.max_iter + ep_data = el[-1] + init_obs = ep_data.observations[0] + # line 1 is disco + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == -1 + assert not init_obs.line_status[1] + # line 0 should not + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == 1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == 1 + assert init_obs.line_status[0] + + # check the warning is raised + with self.assertRaises(UserWarning): + with warnings.catch_warnings(): + warnings.filterwarnings("error") + res = self.runner.run(nb_episode=2, + reset_options={"init state": {"set_line_status": [(0, -1)], "method": "ignore"}}, + max_iter=self.max_iter, + episode_id=[0, 1], + nb_process=nb_process, + init_states={"set_line_status": [(1, -1)], "method": "ignore"}, + add_detailed_output=True + ) + + def test_run_two_eps_seq_two_options_init_act(self, nb_process=1): + # one list (of two different elements here) + res = self.runner.run(nb_episode=2, + reset_options=[{"time serie id": 0, + "max step": self.max_iter + 1, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"}}, + {"time serie id": 1, + "max step": self.max_iter + 2, + "init state": {"set_line_status": [(1, -1)], "method": "ignore"}}], + nb_process=nb_process, + add_detailed_output=True + ) + assert res[0][1]== '0000' + assert res[0][3] == self.max_iter + 1 + assert res[0][4] == self.max_iter + 1 + ep_data = res[0][-1] + init_obs = ep_data.observations[0] + # line 0 is disco + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == -1 + assert not init_obs.line_status[0] + # line 1 should not + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == 1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == 1 + assert init_obs.line_status[1] + + assert res[1][1]== '0001' + assert res[1][3] == self.max_iter + 2 + assert res[1][4] == self.max_iter + 2 + ep_data = res[1][-1] + init_obs = ep_data.observations[0] + # line 1 is disco + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == -1 + assert not init_obs.line_status[1] + # line 0 should not + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == 1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == 1 + assert init_obs.line_status[0] + + # one tuple (of two different elements here) + res = self.runner.run(nb_episode=2, + reset_options=({"time serie id": 0, + "max step": self.max_iter + 1, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"}}, + {"time serie id": 1, + "max step": self.max_iter + 2, + "init state": {"set_line_status": [(1, -1)], "method": "ignore"}}), + nb_process=nb_process, + add_detailed_output=True + ) + assert res[0][1]== '0000' + assert res[0][3] == self.max_iter + 1 + assert res[0][4] == self.max_iter + 1 + ep_data = res[0][-1] + init_obs = ep_data.observations[0] + # line 0 is disco + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == -1 + assert not init_obs.line_status[0] + # line 1 should not + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == 1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == 1 + assert init_obs.line_status[1] + + assert res[1][1]== '0001' + assert res[1][3] == self.max_iter + 2 + assert res[1][4] == self.max_iter + 2 + ep_data = res[1][-1] + init_obs = ep_data.observations[0] + # line 1 is disco + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == -1 + assert not init_obs.line_status[1] + # line 0 should not + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == 1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == 1 + assert init_obs.line_status[0] + + # check the correct init state is used + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + res = self.runner.run(nb_episode=2, + reset_options={"init state": {"set_line_status": [(0, -1)], "method": "ignore"}}, + max_iter=self.max_iter, + episode_id=[0, 1], + nb_process=nb_process, + add_detailed_output=True, + init_states={"set_line_status": [(1, -1)], "method": "ignore"} + ) + assert res[0][1]== '0000' + assert res[1][1]== '0001' + for el in res: + assert el[3] == self.max_iter + assert el[4] == self.max_iter + ep_data = el[-1] + init_obs = ep_data.observations[0] + # line 1 is disco + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == -1 + assert not init_obs.line_status[1] + # line 0 should not + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == 1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == 1 + assert init_obs.line_status[0] + + # check the warning is raised + with self.assertRaises(UserWarning): + with warnings.catch_warnings(): + warnings.filterwarnings("error") + res = self.runner.run(nb_episode=2, + reset_options={"init state": {"set_line_status": [(0, -1)], "method": "ignore"}}, + max_iter=self.max_iter, + episode_id=[0, 1], + nb_process=nb_process, + add_detailed_output=True, + init_states={"set_line_status": [(1, -1)], "method": "ignore"} + ) + + def test_run_two_eps_par_init_act(self): + self.test_run_two_eps_seq_init_act(nb_process=2) + + def test_run_two_eps_par_two_opts_init_act(self): + self.test_run_two_eps_seq_two_options_init_act(nb_process=2) + + +if __name__ == "__main__": + unittest.main() diff --git a/grid2op/typing_variables.py b/grid2op/typing_variables.py index 463d9adb7..0d0c03968 100644 --- a/grid2op/typing_variables.py +++ b/grid2op/typing_variables.py @@ -45,6 +45,8 @@ #: type hints for the "options" flag of reset function RESET_OPTIONS_TYPING = Union[Dict[Literal["time serie id"], int], Dict[Literal["init state"], DICT_ACT_TYPING], + Dict[Literal["init ts"], int], + Dict[Literal["max step"], int], None] #: type hints for a "GridObject" when converted to a dictionary @@ -54,3 +56,9 @@ np.ndarray, # eg load_to_subid, gen_pos_topo_vect List[Union[int, str, float, bool]]] ] + +#: n_busbar_per_sub +N_BUSBAR_PER_SUB_TYPING = Union[int, # one for all substation + List[int], # give info for all substations + Dict[str, int] # give information for some substation + ] diff --git a/setup.py b/setup.py index ec6d9f963..db3c36bf2 100644 --- a/setup.py +++ b/setup.py @@ -84,7 +84,6 @@ def my_test_suite(): } pkgs["extras"]["test"] += pkgs["extras"]["optional"] pkgs["extras"]["test"] += pkgs["extras"]["plot"] -pkgs["extras"]["test"] += pkgs["extras"]["chronix2grid"] pkgs["extras"]["test"] += pkgs["extras"]["gymnasium"] if sys.version_info.minor <= 7: