diff --git a/docs/source/configtables/clustering.csv b/docs/source/configtables/clustering.csv index 5fd8f67a..34a6efa8 100644 --- a/docs/source/configtables/clustering.csv +++ b/docs/source/configtables/clustering.csv @@ -2,10 +2,10 @@ simplify_network:,,, to_substations,bool,"{true, false}",Implementation curerntly overrides to true. Network is simplified to substation nodes with positive or negative power injection. algorithm,str,{'kmeans'}, -feature,str," {'solar+onwind-time', 'solar+onwind-cap', 'solar-time', 'solar-cap', 'solar+offwind-cap'}",For HAC clustering. Currenntly unused in pypsa-usa. +feature,str," {'solar+onwind-time', 'solar+onwind-cap', 'solar-time', 'solar-cap', 'solar+offwind-cap'}",For HAC clustering. cluster_network:,,, algorithm,str,{'kmeans'}, -feature,str," {'solar+onwind-time', 'solar+onwind-cap', 'solar-time', 'solar-cap', 'solar+offwind-cap'}",For HAC clustering. Currenntly unused in pypsa-usa. -aggregation_zones,str,"{'balancing_area', 'state', 'country'}",Boundaries of GIS shapes that are to be respected in clustering. Retain if you would like to analyze expansion within a given zone. +feature,str," {'solar+onwind-time', 'solar+onwind-cap', 'solar-time', 'solar-cap', 'solar+offwind-cap'}",For HAC clustering. +aggregation_zones,str,"{'balancing_area', 'state'}",Boundaries of GIS shapes that are to be respected in clustering. Retain if you would like to analyze expansion within a given zone. aggregation_strategies:,,, table --> {key},str,"{'mean','max','min',etc}","Specifiy the method of aggregating fields within the generators, buses tables. " diff --git a/docs/source/configtables/lines.csv b/docs/source/configtables/lines.csv index 40bbe297..d9a00040 100644 --- a/docs/source/configtables/lines.csv +++ b/docs/source/configtables/lines.csv @@ -1,13 +1,13 @@ -,Unit,Values,Description -types,--,"Values should specify a `line type in PyPSA `_. Keys should specify the corresponding voltage level (e.g. 220., 300. and 380. kV)","Specifies line types to assume for the different voltage levels of the ENTSO-E grid extraction. Should normally handle voltage levels 220, 300, and 380 kV" -s_max_pu,--,"Value in [0.,1.]","Correction factor for line capacities (`s_nom`) to approximate :math:`N-1` security and reserve capacity for reactive power flows" -s_nom_max,MW,"float","Global upper limit for the maximum capacity of each extendable line." -max_extension,MW,"float","Upper limit for the extended capacity of each extendable line." -length_factor,--,float,"Correction factor to account for the fact that buses are *not* connected by lines through air-line distance." -under_construction,--,"One of {'zero': set capacity to zero, 'remove': remove completely, 'keep': keep with full capacity}","Specifies how to handle lines which are currently under construction." -dynamic_line_rating,,, --- activate,bool,"true or false","Whether to take dynamic line rating into account" --- cutout,--,"Should be a folder listed in the configuration ``atlite: cutouts:`` (e.g. 'europe-2013-era5') or reference an existing folder in the directory ``cutouts``. Source module must be ERA5.","Specifies the directory where the relevant weather data ist stored." --- correction_factor,--,"float","Factor to compensate for overestimation of wind speeds in hourly averaged wind data" --- max_voltage_difference,deg,"float","Maximum voltage angle difference in degrees or 'false' to disable" --- max_line_rating,--,"float","Maximum line rating relative to nominal capacity without DLR, e.g. 1.3 or 'false' to disable" +,Unit,Values,Description +types,--,"Values should specify a `line type in PyPSA `_. Keys should specify the corresponding voltage level (e.g. 220., 300. and 380. kV)",Specifies line types to assume for the different voltage levels of the TAMU Network. +s_max_pu,--,"Value in [0.,1.]",Correction factor for line capacities (`s_nom`) to approximate :math:`N-1` security and reserve capacity for reactive power flows +s_nom_max,MW,float,Global upper limit for the maximum capacity of each extendable line. +max_extension,MW,float,Upper limit for the extended capacity of each extendable line. +length_factor,--,float,Correction factor to account for the fact that buses are *not* connected by lines through air-line distance. +under_construction,--,"One of {'zero': set capacity to zero, 'remove': remove completely, 'keep': keep with full capacity}",Specifies how to handle lines which are currently under construction. +dynamic_line_rating,,, +#NAME?,bool,true or false,Whether to take dynamic line rating into account +#NAME?,--,Should be a folder listed in the configuration ``atlite: cutouts:`` (e.g. 'europe-2013-era5') or reference an existing folder in the directory ``cutouts``. Source module must be ERA5.,Specifies the directory where the relevant weather data is stored. +#NAME?,--,float,Factor to compensate for overestimation of wind speeds in hourly averaged wind data +#NAME?,deg,float,Maximum voltage angle difference in degrees or 'false' to disable +#NAME?,--,float,"Maximum line rating relative to nominal capacity without DLR, e.g. 1.3 or 'false' to disable" diff --git a/docs/source/configtables/opts.csv b/docs/source/configtables/opts.csv index b468be6e..0812e404 100644 --- a/docs/source/configtables/opts.csv +++ b/docs/source/configtables/opts.csv @@ -1,12 +1,12 @@ -Trigger, Description, Definition, Status -``nH``; i.e. ``2H``-``6H``, Resample the time-resolution by averaging over every ``n`` snapshots, ``prepare_network``: `average_every_nhours() `_ and its `caller `__), In active use -``nSEG``; e.g. ``4380SEG``, "Apply time series segmentation with `tsam `_ package to ``n`` adjacent snapshots of varying lengths based on capacity factors of varying renewables, hydro inflow and load.", ``prepare_network``: apply_time_segmentation(), In active use -``Co2L``, Add an overall absolute carbon-dioxide emissions limit configured in ``electricity: co2limit``. If a float is appended an overall emission limit relative to the emission level given in ``electricity: co2base`` is added (e.g. ``Co2L0.05`` limits emissisions to 5% of what is given in ``electricity: co2base``), ``prepare_network``: `add_co2limit() `_ and its `caller `__, In active use -``Ep``, Add cost for a carbon-dioxide price configured in ``costs: emission_prices: co2`` to ``marginal_cost`` of generators (other emission types listed in ``network.carriers`` possible as well), ``prepare_network``: `add_emission_prices() `_ and its `caller `__, In active use -``CCL``, Add minimum and maximum levels of generator nominal capacity per carrier for individual countries. These can be specified in the file linked at ``electricity: agg_p_nom_limits`` in the configuration. File defaults to ``data/agg_p_nom_minmax.csv``., ``solve_network``, In active use -``EQ``, "Require each country or node to on average produce a minimal share of its total consumption itself. Example: ``EQ0.5c`` demands each country to produce on average at least 50% of its consumption; ``EQ0.5`` demands each node to produce on average at least 50% of its consumption.", ``solve_network``, In active use -``ATK``, "Require each node to be autarkic. Example: ``ATK`` removes all lines and links. ``ATKc`` removes all cross-border lines and links.", ``prepare_network``, In active use -``BAU``, Add a per-``carrier`` minimal overall capacity; i.e. at least ``40GW`` of ``OCGT`` in Europe; configured in ``electricity: BAU_mincapacities``, ``solve_network``: `add_opts_constraints() `__, Untested -``SAFE``, Add a capacity reserve margin of a certain fraction above the peak demand to which renewable generators and storage do *not* contribute. Ignores network., ``solve_network`` `add_opts_constraints() `__, Untested -``carrier+{c|p|m}factor``,"Alter the capital cost (``c``), installable potential (``p``) or marginal costs (``m``) of a carrier by a factor. Example: ``solar+c0.5`` reduces the capital cost of solar to 50\% of original values.", ``prepare_network``, In active use -``CH4L``,"Add an overall absolute gas limit. If configured in ``electricity: gaslimit`` it is given in MWh thermal, if a float is appended, the overall gaslimit is assumed to be given in TWh thermal (e.g. ``CH4L200`` limits gas dispatch to 200 TWh termal)", ``prepare_network``: ``add_gaslimit()``, In active use +Trigger, Description, Definition, Status, +``nH``; i.e. ``2H``-``6H``, Resample the time-resolution by averaging over every ``n`` snapshots, ``prepare_network``: `average_every_nhours() `_ and its `caller `__), In active use, +``nSEG``; e.g. ``4380SEG``," ""Apply time series segmentation with `tsam `_ package to ``n`` adjacent snapshots of varying lengths based on capacity factors of varying renewables"," hydro inflow and load.""", ``prepare_network``: apply_time_segmentation(), In active use +``Co2L``, Add an overall absolute carbon-dioxide emissions limit configured in ``electricity: co2limit``. If a float is appended an overall emission limit relative to the emission level given in ``electricity: co2base`` is added (e.g. ``Co2L0.05`` limits emissisions to 5% of what is given in ``electricity: co2base``), ``prepare_network``: `add_co2limit() `_ and its `caller `__, In active use, +``Ep``, Add cost for a carbon-dioxide price configured in ``costs: emission_prices: co2`` to ``marginal_cost`` of generators (other emission types listed in ``network.carriers`` possible as well), ``prepare_network``: `add_emission_prices() `_ and its `caller `__, In active use, +``CCL``, Add minimum and maximum levels of generator nominal capacity per carrier for individual regions. These can be specified in the file linked at ``electricity: agg_p_nom_limits`` in the configuration. File defaults to ``data/agg_p_nom_minmax.csv``., ``solve_network``, In active use, +``EQ``," ""Require each region or node to on average produce a minimal share of its total consumption itself. Example: ``EQ0.5c`` demands each region to produce on average at least 50% of its consumption; ``EQ0.5`` demands each node to produce on average at least 50% of its consumption.""", ``solve_network``, In active use, +``BAU``, Add a per-``carrier`` minimal overall capacity; i.e. at least ``40GW`` of ``OCGT`` in a given Interconnect; configured in ``electricity: BAU_mincapacities``, ``solve_network``: `add_opts_constraints() `__, Untested, +``SAFE``, Add a capacity reserve margin (a.k.a Planning Reserve Margin) of a certain fraction above the peak demand to which renewable generators and storage do *not* contribute. Ignores network constraints., ``solve_network`` `add_opts_constraints() `__, Untested, +``carrier+{c|p|m}factor``,"Alter the capital cost (``c``), installable potential (``p``) or marginal costs (``m``) of a carrier by a factor. Example: ``solar+c0.5`` reduces the capital cost of solar to 50\% of original values.", ``prepare_network``, In active use, +``CH4L``,"Add an overall absolute gas limit. If configured in ``electricity: gaslimit`` it is given in MWh thermal, if a float is appended, the overall gaslimit is assumed to be given in TWh thermal (e.g. ``CH4L200`` limits gas dispatch to 200 TWh termal)", ``prepare_network``: ``add_gaslimit()``, In active use, +``ATK``," ""Require each node to be autarkic. Example: ``ATK`` removes all lines and links. ``ATKc`` removes all cross-border lines and links.""", ``prepare_network``, In active use, diff --git a/workflow/Snakefile b/workflow/Snakefile index c2ecf247..8a2587c5 100644 --- a/workflow/Snakefile +++ b/workflow/Snakefile @@ -22,7 +22,6 @@ FIGURES_SINGLE = [ "production_bar", "production_area", "emissions_area", - "emissions_accumulated", "emissions_accumulated_tech", "emissions_map", "renewable_potential_map", @@ -38,7 +37,6 @@ FIGURES_VALIDATE = [ FIGURES_SINGLE_HTML = [ "production_area_html", "emissions_area_html", - # "emissions_node_html", "emissions_region_html", "emissions_accumulated_tech_html", ] @@ -66,8 +64,8 @@ wildcard_constraints: # Merge subworkflow configs and main config # configfile: "config/tests/config.validation.yaml" -# configfile: "config/tests/config.test_simple.yaml" -configfile: "config/config.default.yaml" +configfile: "config/tests/config.test_simple.yaml" +# configfile: "config/config.default.yaml" configfile: "config/config.cluster.yaml" configfile: "config/config.osw.yaml" configfile: "config/config.plotting.yaml" diff --git a/workflow/config/config.cluster.yaml b/workflow/config/config.cluster.yaml index c8681777..c3d2fd1c 100644 --- a/workflow/config/config.cluster.yaml +++ b/workflow/config/config.cluster.yaml @@ -5,8 +5,8 @@ __default__: walltime: 00:30:00 # time limit for each job cpus_per_task: 1 # number of cores per job chdir: $GROUP_HOME/kamran/pypsa-usa/workflow - output: logs/slurm/{rule}-%j.out - error: logs/slurm/err_{rule}-%j.err + output: logs/{rule}/log-%j.out + error: logs/{rule}/errlog-%j.err build_renewable_profiles: walltime: 02:00:00 diff --git a/workflow/config/config.default.yaml b/workflow/config/config.default.yaml index 5ffc5600..6275eca6 100644 --- a/workflow/config/config.default.yaml +++ b/workflow/config/config.default.yaml @@ -17,7 +17,7 @@ scenario: scope: "total" # "urban", "rural", or "total" sector: "" # G planning_horizons: - - 2030 # # Uncomment to use NREL EFS Demand Forecasts (2030, 2040, 2050) + - 2030 #(2030, 2040, 2050) foresight: # Only Single Stage Currently @@ -73,6 +73,7 @@ electricity: co2base: 226.86e+6 #base_from_2020 Locations of the 250 MMmt of CO2 emissions from the WECC 2021. gaslimit: false # global gas usage limit of X MWh_th retirement: economic # "economic" or "technical" + SAFE_reservemargin: 0.15 operational_reserve: activate: false @@ -90,7 +91,7 @@ electricity: Store: [] #[H2] Link: [] #[H2 pipeline] - demand: #EFS used for given planning_horizons year + demand: #EFS used for given planning_horizons year (only ref/mod implemented) EFS_case: reference # reference, medium, high EFS_speed: moderate # slow, moderate, rapid @@ -259,19 +260,14 @@ clustering: cluster_network: algorithm: kmeans feature: solar+onwind-time - aggregation_zones: 'balancing_area' # [balancing_area, state] + aggregation_zones: 'state' # [balancing_area, state] + exclude_carriers: [] + consider_efficiency_classes: false aggregation_strategies: generators: - p_nom_max: sum # use "min" for more conservative assumptions - p_nom_min: sum - p_min_pu: mean - marginal_cost: mean committable: any - ramp_limit_up: mean - ramp_limit_down: mean - efficiency: mean - buses: - state: max # temp fix. When fixing state aggregation- change add electricity such that region info not is use is removed. + ramp_limit_up: max + ramp_limit_down: max focus_weights: # California: 0.5 diff --git a/workflow/config/tests/config.test.yaml b/workflow/config/tests/config.test.yaml index 62b87545..5b584592 100644 --- a/workflow/config/tests/config.test.yaml +++ b/workflow/config/tests/config.test.yaml @@ -12,13 +12,13 @@ run: # docs : scenario: interconnect: western #"usa|texas|western|eastern" - clusters: [30, 100] - opts: [Co2L1.0-4H, Co2L0.75-4H, Co2L0.5-4H, Co2L0.25-4H, Co2L0.0-4H] - ll: [vopt, v1.15] + clusters: [40] + opts: [Co2L0.30-4H-Ep-SAFE] + ll: [v1.05] scope: "total" # "urban", "rural", or "total" sector: "" # G planning_horizons: - - 2030 # # Uncomment to use NREL EFS Demand Forecasts (2030, 2040, 2050) + - 2030 #(2030, 2040, 2050) foresight: # Only Single Stage Currently @@ -76,9 +76,10 @@ electricity: co2base: 226.86e+6 #base_from_2020 Locations of the 250 MMmt of CO2 emissions from the WECC 2021. gaslimit: false # global gas usage limit of X MWh_th retirement: economic # "economic" or "technical" + SAFE_reservemargin: 0.15 operational_reserve: - activate: true + activate: false epsilon_load: 0.02 epsilon_vres: 0.02 contingency: 4000 @@ -242,7 +243,7 @@ costs: # based on the potentials, assuming (0.1 kW/m2 and 10 m2/person) battery: 0. battery inverter: 0. emission_prices: # in currency per tonne emission, only used with the option Ep - co2: 0. + co2: 20.0 # docs : sector: @@ -263,19 +264,14 @@ clustering: cluster_network: algorithm: kmeans feature: solar+onwind-time - aggregation_zones: 'balancing_area' # [balancing_area, state] + aggregation_zones: 'state' # [balancing_area, state] + exclude_carriers: [] + consider_efficiency_classes: false aggregation_strategies: generators: - p_nom_max: sum # use "min" for more conservative assumptions - p_nom_min: sum - p_min_pu: mean - marginal_cost: mean committable: any - ramp_limit_up: mean - ramp_limit_down: mean - efficiency: mean - buses: - state: max # temp fix. When fixing state aggregation- change add electricity such that region info not is use is removed. + ramp_limit_up: max + ramp_limit_down: max focus_weights: # California: 0.5 diff --git a/workflow/config/tests/config.test_simple.yaml b/workflow/config/tests/config.test_simple.yaml index 1e9e7af8..1415ac23 100644 --- a/workflow/config/tests/config.test_simple.yaml +++ b/workflow/config/tests/config.test_simple.yaml @@ -12,12 +12,12 @@ run: scenario: interconnect: [western] #"usa|texas|western|eastern" clusters: [40] - opts: [Co2L0.1, Co2L0.2] + opts: [Co2L0.30-3H-Ep-SAFE] ll: [v1.0] scope: "total" # "urban", "rural", or "total" sector: "" # G planning_horizons: - - 2030 # # Uncomment to use NREL EFS Demand Forecasts (2030, 2040, 2050) + - 2030 #(2030, 2040, 2050) foresight: # Only Single Stage Currently @@ -29,7 +29,7 @@ countries: [US] snapshots: start: "2019-01-01" - end: "2020-01-01" + end: "2019-02-01" inclusive: 'left' # docs : @@ -72,6 +72,7 @@ electricity: co2base: 225.0e+6 #base_from_2020 Locations of the 250 MMmt of CO2 emissions from the WECC 2021. gaslimit: false # global gas usage limit of X MWh_th retirement: economic # "economic" or "technical" + SAFE_reservemargin: 0.15 operational_reserve: activate: false @@ -254,24 +255,19 @@ sector: clustering: simplify_network: to_substations: false # network is simplified to nodes with positive or negative power injection (i.e. substations or offwind connections) - algorithm: kmeans # choose from: [hac, kmeans] + algorithm: hac # choose from: [hac, kmeans] feature: solar+onwind-time # only for hac. choose from: [solar+onwind-time, solar+onwind-cap, solar-time, solar-cap, solar+offwind-cap] etc. cluster_network: - algorithm: kmeans + algorithm: hac feature: solar+onwind-time - aggregation_zones: 'balancing_area' # [balancing_area, state] + aggregation_zones: 'state' # [balancing_area, state] + exclude_carriers: [] + consider_efficiency_classes: false aggregation_strategies: generators: - p_nom_max: sum # use "min" for more conservative assumptions - p_nom_min: sum - p_min_pu: mean - marginal_cost: mean committable: any - ramp_limit_up: mean - ramp_limit_down: mean - efficiency: mean - buses: - state: max # temp fix. When fixing state aggregation- change add electricity such that region info not is use is removed. + ramp_limit_up: max + ramp_limit_down: max focus_weights: # California: 0.5 diff --git a/workflow/config/tests/config.validation.yaml b/workflow/config/tests/config.validation.yaml index e3a55070..23883785 100644 --- a/workflow/config/tests/config.validation.yaml +++ b/workflow/config/tests/config.validation.yaml @@ -17,7 +17,7 @@ scenario: scope: "total" # "urban", "rural", or "total" sector: "" # G planning_horizons: - # - 2030 # # Uncomment to use NREL EFS Demand Forecasts (2030, 2040, 2050) + # - 2030 #(2030, 2040, 2050) foresight: # Only Single Stage Currently @@ -260,19 +260,14 @@ clustering: cluster_network: algorithm: kmeans feature: solar+onwind-time - aggregation_zones: 'balancing_area' # balancing_area, country, or state. # Currently issue in State aggregation + aggregation_zones: 'balancing_area' # [balancing_area, state] + exclude_carriers: [] + consider_efficiency_classes: false aggregation_strategies: generators: - p_nom_max: sum # use "min" for more conservative assumptions - p_nom_min: sum - p_min_pu: mean - marginal_cost: mean committable: any - ramp_limit_up: mean - ramp_limit_down: mean - efficiency: mean - buses: - state: max # temp fix. When fixing state aggregation- change add electricity such that region info not is use is removed. + ramp_limit_up: max + ramp_limit_down: max focus_weights: # California: 0.5 diff --git a/workflow/envs/environment.yaml b/workflow/envs/environment.yaml index 4f2af980..a25a80e0 100644 --- a/workflow/envs/environment.yaml +++ b/workflow/envs/environment.yaml @@ -4,17 +4,19 @@ channels: - bioconda dependencies: -- python==3.11.6 -- pip==23.3 -- pypsa==0.25.1 -- linopy==0.2.6 +- python>=3.8 +- pip + +- pypsa>=0.26.1 +- atlite>=0.2.9 +- linopy -- atlite==0.2.11 - dask==2023.7.0 - dask-core==2023.7.0 # Dependencies of the workflow itself -- pandas>=0.24.0,<2.1 +- pandas>=2.1 +- xarray>=2023.11.0 - xlrd==2.0.1 - openpyxl==3.1.2 - pycountry==22.3.5 @@ -25,17 +27,15 @@ dependencies: - pytables==3.9.1 - lxml==4.9.3 - numpy==1.26.0 -- xarray==2023.6.0 - netcdf4==1.6.4 - networkx==3.1 - scipy==1.11.3 - shapely==2.0.2 - progressbar2==4.3.2 -- pyomo==6.6.2 - matplotlib==3.8.0 - plotly==5.17.0 -- powerplantmatching==0.5.7 - +- graphviz +- powerplantmatching # Keep in conda environment when calling ipython - ipython==8.16.1 @@ -44,14 +44,14 @@ dependencies: - ipykernel==6.25.2 # GIS dependencies: +- geopy==2.4.0 - cartopy==0.22.0 - descartes==1.1.0 - rasterio==1.3.8 -- geopandas==0.14.0 +- geopandas>=0.11.0 - geopandas-base==0.14.0 # TODO: check these dependencies -- geopy==2.4.0 - tqdm==4.66.1 - pytz==2023.3.post1 - country_converter==1.0.0 @@ -62,3 +62,4 @@ dependencies: - vresutils==0.3.1 - tsam>=1.1.0 - gurobipy==10.0.3 + - highspy diff --git a/workflow/repo_data/agg_p_nom_minmax.csv b/workflow/repo_data/agg_p_nom_minmax.csv new file mode 100644 index 00000000..9089f16c --- /dev/null +++ b/workflow/repo_data/agg_p_nom_minmax.csv @@ -0,0 +1,2 @@ +country,carrier,min,max +California,solar,0, \ No newline at end of file diff --git a/workflow/rules/build_electricity.smk b/workflow/rules/build_electricity.smk index f58109f9..a7ce4581 100644 --- a/workflow/rules/build_electricity.smk +++ b/workflow/rules/build_electricity.smk @@ -237,7 +237,7 @@ rule build_demand: BENCHMARKS + "{interconnect}/build_demand" threads: 1 resources: - mem_mb=10000, + mem_mb=12000, script: "../scripts/build_demand.py" @@ -318,13 +318,21 @@ rule simplify_network: threads: 2 resources: mem_mb=10000, - group: - "agg_network" script: "../scripts/simplify_network.py" rule cluster_network: + params: + cluster_network=config["clustering"]["cluster_network"], + conventional_carriers=config["electricity"].get("conventional_carriers", []), + renewable_carriers=config["electricity"]["renewable_carriers"], + aggregation_strategies=config["clustering"].get("aggregation_strategies", {}), + custom_busmap=config["enable"].get("custom_busmap", False), + focus_weights=config.get("focus_weights", None), + max_hours=config["electricity"]["max_hours"], + length_factor=config["lines"]["length_factor"], + costs=config["costs"], input: network=RESOURCES + "{interconnect}/elec_s.nc", regions_onshore=RESOURCES + "{interconnect}/regions_onshore.geojson", @@ -351,10 +359,8 @@ rule cluster_network: threads: 1 resources: mem_mb=10000, - group: - "agg_network" script: - "../scripts/cluster_network_eur.py" + "../scripts/subworkflows/pypsa-eur/scripts/cluster_network.py" rule add_extra_components: @@ -371,7 +377,7 @@ rule add_extra_components: resources: mem_mb=4000, group: - "agg_network" + "prepare" script: "../scripts/add_extra_components.py" @@ -396,7 +402,7 @@ rule prepare_network: resources: mem_mb=4000, group: - "agg_network" + "prepare" log: "logs/prepare_network", script: diff --git a/workflow/rules/build_sector.smk b/workflow/rules/build_sector.smk index beb7b5f0..b73d843e 100644 --- a/workflow/rules/build_sector.smk +++ b/workflow/rules/build_sector.smk @@ -31,6 +31,11 @@ rule add_sectors: output: network=RESOURCES + "{interconnect}/elec_s_{clusters}_ec_l{ll}_{opts}_{sector}.nc", + group: + "prepare" + threads: 1 + resources: + mem_mb=4000, script: "../scripts/add_sectors.py" diff --git a/workflow/run_slurm.sh b/workflow/run_slurm.sh index 75d71459..4b9d6b1d 100644 --- a/workflow/run_slurm.sh +++ b/workflow/run_slurm.sh @@ -1,2 +1,2 @@ # SLURM specifications made in default.cluster.yaml & the individual rules -snakemake --cluster "sbatch -A {cluster.account} --mail-type ALL --mail-user {cluster.email} -p {cluster.partition} -t {cluster.walltime} -o {cluster.output} -e {cluster.error} -c {threads} --mem {resources.mem_mb}" --cluster-config config/config.cluster.yaml --jobs 10 --latency-wait 10 +snakemake --cluster "sbatch -A {cluster.account} --mail-type ALL --mail-user {cluster.email} -p {cluster.partition} -t {cluster.walltime} -o {cluster.output} -e {cluster.error} -c {threads} --mem {resources.mem_mb}" --cluster-config config/config.cluster.yaml --jobs 10 --latency-wait 60 diff --git a/workflow/scripts/add_electricity.py b/workflow/scripts/add_electricity.py index ac94e988..1074e5d6 100755 --- a/workflow/scripts/add_electricity.py +++ b/workflow/scripts/add_electricity.py @@ -734,7 +734,11 @@ def match_plant_to_bus(n, plants): return plants_matched -def attach_renewable_capacities_to_atlite(n, plants_df, renewable_carriers): +def attach_renewable_capacities_to_atlite( + n: pypsa.Network, + plants_df: pd.DataFrame, + renewable_carriers: list, +): plants = plants_df.query( "bus_assignment in @n.buses.index", ) @@ -931,14 +935,14 @@ def attach_wind_and_solar( p_nom_max_bus = ( ds["p_nom_max"] .to_dataframe() - .merge(bus2sub, left_on="bus", right_on="sub_id") + .merge(bus2sub[["bus_id", "sub_id"]], left_on="bus", right_on="sub_id") .set_index("bus_id") .p_nom_max ) weight_bus = ( ds["weight"] .to_dataframe() - .merge(bus2sub, left_on="bus", right_on="sub_id") + .merge(bus2sub[["bus_id", "sub_id"]], left_on="bus", right_on="sub_id") .set_index("bus_id") .weight ) @@ -946,12 +950,15 @@ def attach_wind_and_solar( ds["profile"] .transpose("time", "bus") .to_pandas() - .T.merge(bus2sub, left_on="bus", right_on="sub_id") + .T.merge( + bus2sub[["bus_id", "sub_id"]], + left_on="bus", + right_on="sub_id", + ) .set_index("bus_id") .drop(columns="sub_id") .T ) - if supcar == "offwind": capital_cost = capital_cost.to_frame().reset_index() capital_cost.bus = capital_cost.bus.astype(int) diff --git a/workflow/scripts/build_base_network.py b/workflow/scripts/build_base_network.py index 97bf24b2..14da2b5e 100644 --- a/workflow/scripts/build_base_network.py +++ b/workflow/scripts/build_base_network.py @@ -98,7 +98,7 @@ def add_buses_from_file( interconnect=buses.interconnect, x=buses.lon, y=buses.lat, - sub_id=buses.sub_id, + sub_id=buses.sub_id.astype(int), substation_off=False, poi=False, LAF_states=buses.LAF_states, @@ -317,7 +317,7 @@ def add_offshore_buses(n: pypsa.Network, offshore_buses: pd.DataFrame) -> pypsa. interconnect="Offshore", x=offshore_buses.lon, y=offshore_buses.lat, - sub_id=offshore_buses.sub_id.astype(str), + sub_id=offshore_buses.sub_id.astype(int), substation_off=True, poi_sub=False, poi_bus=False, diff --git a/workflow/scripts/build_demand.py b/workflow/scripts/build_demand.py index 6879b0d4..b3f5a0c1 100644 --- a/workflow/scripts/build_demand.py +++ b/workflow/scripts/build_demand.py @@ -153,7 +153,7 @@ def prepare_efs_demand( year=planning_horizons[0], month=1, day=1, - ) + pd.to_timedelta(demand["LocalHourID"] - 1, unit="H") + ) + pd.to_timedelta(demand["LocalHourID"] - 1, unit="h") demand["UTC_Time"] = demand.groupby(["State"])["DateTime"].transform(local_to_utc) demand.drop(columns=["LocalHourID", "DateTime"], inplace=True) demand.set_index("UTC_Time", inplace=True) @@ -180,12 +180,16 @@ def prepare_efs_demand( .apply( lambda group: group.loc[ group.drop(columns="UTC_Time").first_valid_index() - ] + ], ) .drop(columns="UTC_Time") ) + # take the intersection of the demand and the snapshots by hour of year + hoy = (n.snapshots.dayofyear - 1) * 24 + n.snapshots.hour + demand_new = demand_new.loc[hoy] demand_new.index = n.snapshots + n.buses.rename(columns={"LAF_states": "LAF"}, inplace=True) return disaggregate_demand_to_buses(n, demand_new) @@ -259,7 +263,7 @@ def main(snakemake): ) else: raise ValueError( - "Invalid demand_type. Supported values are 'ads', and 'pypsa-usa'." + "Invalid demand_type. Supported values are 'ads', and 'pypsa-usa'.", ) demand_per_bus.to_csv(snakemake.output.demand, index=True) diff --git a/workflow/scripts/cluster_network_eur.py b/workflow/scripts/cluster_network_eur.py index 52d09d3b..f30fa0b8 100644 --- a/workflow/scripts/cluster_network_eur.py +++ b/workflow/scripts/cluster_network_eur.py @@ -1,12 +1,16 @@ -# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT + # coding: utf-8 + +# ADAPTED FROM PyPSA-Eur for PyPSA-USA """ Creates networks clustered to ``{cluster}`` number of zones with aggregated buses, generators and transmission corridors. -**Relevant Settings** +Relevant Settings +----------------- .. code:: yaml @@ -24,10 +28,11 @@ length_factor: .. seealso:: - Documentation of the configuration file ``config.yaml`` at - :ref:`renewable_cf`, :ref:`solving_cf`, :ref:`lines_cf` + Documentation of the configuration file ``config/config.yaml`` at + :ref:`toplevel_cf`, :ref:`renewable_cf`, :ref:`solving_cf`, :ref:`lines_cf` -**Inputs** +Inputs +------ - ``resources/regions_onshore_elec_s{simpl}.geojson``: confer :ref:`simplify` - ``resources/regions_offshore_elec_s{simpl}.geojson``: confer :ref:`simplify` @@ -35,26 +40,28 @@ - ``networks/elec_s{simpl}.nc``: confer :ref:`simplify` - ``data/custom_busmap_elec_s{simpl}_{clusters}.csv``: optional input -**Outputs** +Outputs +------- - ``resources/regions_onshore_elec_s{simpl}_{clusters}.geojson``: - # .. image:: ../img/regions_onshore_elec_s_X.png - # :scale: 33 % + .. image:: img/regions_onshore_elec_s_X.png + :scale: 33 % - ``resources/regions_offshore_elec_s{simpl}_{clusters}.geojson``: - # .. image:: ../img/regions_offshore_elec_s_X.png - # :scale: 33 % + .. image:: img/regions_offshore_elec_s_X.png + :scale: 33 % - ``resources/busmap_elec_s{simpl}_{clusters}.csv``: Mapping of buses from ``networks/elec_s{simpl}.nc`` to ``networks/elec_s{simpl}_{clusters}.nc``; - ``resources/linemap_elec_s{simpl}_{clusters}.csv``: Mapping of lines from ``networks/elec_s{simpl}.nc`` to ``networks/elec_s{simpl}_{clusters}.nc``; - ``networks/elec_s{simpl}_{clusters}.nc``: - # .. image:: ../img/elec_s_X.png - # :scale: 40 % + .. image:: img/elec_s_X.png + :scale: 40 % -**Description** +Description +----------- .. note:: @@ -83,15 +90,38 @@ **Is it possible to run the model without the** ``simplify_network`` **rule?** No, the network clustering methods in the PyPSA module - `pypsa.networkclustering `_ + `pypsa.clustering.spatial `_ do not work reliably with multiple voltage levels and transformers. .. tip:: - The rule :mod:`cluster_all_networks` runs + The rule :mod:`cluster_networks` runs for all ``scenario`` s in the configuration file the rule :mod:`cluster_network`. -""" +Exemplary unsolved network clustered to 512 nodes: + +.. image:: img/elec_s_512.png + :scale: 40 % + :align: center + +Exemplary unsolved network clustered to 256 nodes: + +.. image:: img/elec_s_256.png + :scale: 40 % + :align: center + +Exemplary unsolved network clustered to 128 nodes: + +.. image:: img/elec_s_128.png + :scale: 40 % + :align: center + +Exemplary unsolved network clustered to 37 nodes: + +.. image:: img/elec_s_37.png + :scale: 40 % + :align: center +""" import logging import warnings @@ -104,21 +134,16 @@ import pyomo.environ as po import pypsa import seaborn as sns -from _helpers import configure_logging -from _helpers import export_network_for_gis_mapping -from _helpers import get_aggregation_strategies -from _helpers import update_p_nom_max -from pypsa.clustering.spatial import busmap_by_greedy_modularity -from pypsa.clustering.spatial import busmap_by_hac -from pypsa.clustering.spatial import busmap_by_kmeans -from pypsa.clustering.spatial import get_clustering_from_busmap +from _helpers import configure_logging, update_p_nom_max +from pypsa.clustering.spatial import ( + busmap_by_greedy_modularity, + busmap_by_hac, + busmap_by_kmeans, + get_clustering_from_busmap, +) warnings.filterwarnings(action="ignore", category=UserWarning) -import os, sys - -sys.path.append(os.path.join(os.getcwd(), "subworkflows", "pypsa-eur", "scripts")) - from add_electricity import load_costs idx = pd.IndexSlice @@ -131,13 +156,12 @@ def normed(x): def weighting_for_country(n, x): - # conv_carriers = {'OCGT','CCGT','PHS', 'hydro'} - gen = n.generators.groupby( # .loc[n.generators.carrier.isin(conv_carriers)] + conv_carriers = {"OCGT", "CCGT", "PHS", "hydro"} + gen = n.generators.loc[n.generators.carrier.isin(conv_carriers)].groupby( "bus", - ).p_nom.sum().reindex( - n.buses.index, - fill_value=0.0, - ) + n.storage_units.groupby( # .loc[n.storage_units.carrier.isin(conv_carriers)] + ).p_nom.sum().reindex(n.buses.index, fill_value=0.0) + n.storage_units.loc[ + n.storage_units.carrier.isin(conv_carriers) + ].groupby( "bus", ).p_nom.sum().reindex( n.buses.index, @@ -154,7 +178,6 @@ def weighting_for_country(n, x): def get_feature_for_hac(n, buses_i=None, feature=None): - if buses_i is None: buses_i = n.buses.index @@ -166,7 +189,7 @@ def get_feature_for_hac(n, buses_i=None, feature=None): carriers.remove("offwind") carriers = np.append( carriers, - network.generators.carrier.filter(like="offwind").unique(), + n.generators.carrier.filter(like="offwind").unique(), ) if feature.split("-")[1] == "cap": @@ -218,7 +241,6 @@ def distribute_clusters(n, n_clusters, focus_weights=None, solver_name="cbc"): ), f"Number of clusters must be {len(N)} <= n_clusters <= {N.sum()} for this selection of countries." if focus_weights is not None: - total_focus = sum(list(focus_weights.values())) assert ( @@ -226,9 +248,6 @@ def distribute_clusters(n, n_clusters, focus_weights=None, solver_name="cbc"): ), "The sum of focus weights must be less than or equal to 1." for country, weight in focus_weights.items(): - if country == "Offshore": - L[(country, "0")] = weight + weight**2 - L.pipe(normed) L[country] = weight / len(L[country]) remainder = [ @@ -309,14 +328,14 @@ def fix_country_assignment_for_hac(n): neighbor_bus = n.lines.query( "bus0 == @disconnected_bus or bus1 == @disconnected_bus", ).iloc[0][["bus0", "bus1"]] - new_country = list(set(n.buses.loc[neighbor_bus].country) - {country})[ - 0 - ] + new_country = list( + set(n.buses.loc[neighbor_bus].country) - {country}, + )[0] logger.info( f"overwriting country `{country}` of bus `{disconnected_bus}` " f"to new country `{new_country}`, because it is disconnected " - "from its inital inter-country transmission grid.", + "from its initial inter-country transmission grid.", ) n.buses.at[disconnected_bus, "country"] = new_country return n @@ -394,11 +413,6 @@ def clustering_for_n_clusters( extended_link_costs=0, focus_weights=None, ): - - bus_strategies, generator_strategies = get_aggregation_strategies( - aggregation_strategies, - ) - if not isinstance(custom_busmap, pd.Series): busmap = busmap_for_n_clusters( n, @@ -411,6 +425,10 @@ def clustering_for_n_clusters( else: busmap = custom_busmap + line_strategies = aggregation_strategies.get("lines", dict()) + generator_strategies = aggregation_strategies.get("generators", dict()) + one_port_strategies = aggregation_strategies.get("one_ports", dict()) + clustering = get_clustering_from_busmap( n, busmap, @@ -418,8 +436,9 @@ def clustering_for_n_clusters( aggregate_generators_carriers=aggregate_carriers, aggregate_one_ports=["Load", "StorageUnit"], line_length_factor=line_length_factor, + line_strategies=line_strategies, generator_strategies=generator_strategies, - bus_strategies=bus_strategies, + one_port_strategies=one_port_strategies, scale_link_capital_costs=False, ) @@ -429,7 +448,10 @@ def clustering_for_n_clusters( n.links.eval("underwater_fraction * length").div(nc.links.length).dropna() ) nc.links["capital_cost"] = nc.links["capital_cost"].add( - (nc.links.length - n.links.length).clip(lower=0).mul(extended_link_costs), + (nc.links.length - n.links.length) + .clip(lower=0) + .mul(extended_link_costs) + .dropna(), fill_value=0, ) @@ -437,7 +459,6 @@ def clustering_for_n_clusters( def cluster_regions(busmaps, input=None, output=None): - busmap = reduce(lambda x, y: x.map(y), busmaps[1:], busmaps[0]) for which in ("regions_onshore", "regions_offshore"): @@ -460,41 +481,49 @@ def plot_busmap_for_n_clusters(n, n_clusters, fn=None): if __name__ == "__main__": - print("Running clustering.py directly") if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake( - "cluster_network", - interconnect="western", - clusters="100", - ) + snakemake = mock_snakemake("cluster_network", simpl="", clusters="37") configure_logging(snakemake) - n = pypsa.Network(snakemake.input.network) - focus_weights = snakemake.config.get("focus_weights", None) - - n.buses.drop(columns=["state", "balancing_area", "sub_id"], inplace=True) + params = snakemake.params + solver_name = snakemake.config["solving"]["solver"]["name"] - renewable_carriers = pd.Index( - [ - tech - for tech in n.generators.carrier.unique() - if tech in snakemake.config["renewable"] - ], - ) + n = pypsa.Network(snakemake.input.network) + exclude_carriers = params.cluster_network["exclude_carriers"] + aggregate_carriers = set(n.generators.carrier) - set(exclude_carriers) + conventional_carriers = set(params.conventional_carriers) if snakemake.wildcards.clusters.endswith("m"): n_clusters = int(snakemake.wildcards.clusters[:-1]) - aggregate_carriers = snakemake.config["electricity"].get( - "conventional_carriers", - ) + aggregate_carriers = params.conventional_carriers & aggregate_carriers + elif snakemake.wildcards.clusters.endswith("c"): + n_clusters = int(snakemake.wildcards.clusters[:-1]) + aggregate_carriers = aggregate_carriers - conventional_carriers elif snakemake.wildcards.clusters == "all": n_clusters = len(n.buses) - aggregate_carriers = None # All else: n_clusters = int(snakemake.wildcards.clusters) - aggregate_carriers = None # All + + if params.cluster_network.get("consider_efficiency_classes", False): + carriers = [] + for c in aggregate_carriers: + gens = n.generators.query("carrier == @c") + low = gens.efficiency.quantile(0.10) + high = gens.efficiency.quantile(0.90) + if low >= high: + carriers += [c] + else: + labels = ["low", "medium", "high"] + suffix = pd.cut( + gens.efficiency, + bins=[0, low, high, 1], + labels=labels, + ).astype(str) + carriers += [f"{c} {label} efficiency" for label in labels] + n.generators.carrier.update(gens.carrier + " " + suffix + " efficiency") + aggregate_carriers = carriers if n_clusters == len(n.buses): # Fast-path if no clustering is necessary @@ -508,33 +537,16 @@ def plot_busmap_for_n_clusters(n, n_clusters, fn=None): pd.Series(dtype="O"), ) else: - line_length_factor = snakemake.config["lines"]["length_factor"] Nyears = n.snapshot_weightings.objective.sum() / 8760 hvac_overhead_cost = load_costs( snakemake.input.tech_costs, - snakemake.config["costs"], - snakemake.config["electricity"]["max_hours"], + params.costs, + params.max_hours, Nyears, ).at["HVAC overhead", "capital_cost"] - def consense(x): - v = x.iat[0] - assert ( - x == v - ).all() or x.isnull().all(), "The `potential` configuration option must agree for all renewable carriers, for now!" - return v - - aggregation_strategies = snakemake.config["clustering"].get( - "aggregation_strategies", - {}, - ) - # translate str entries of aggregation_strategies to pd.Series functions: - aggregation_strategies = { - p: {k: getattr(pd.Series, v) for k, v in aggregation_strategies[p].items()} - for p in aggregation_strategies.keys() - } - custom_busmap = snakemake.config["enable"].get("custom_busmap", False) + custom_busmap = params.custom_busmap if custom_busmap: custom_busmap = pd.read_csv( snakemake.input.custom_busmap, @@ -544,27 +556,27 @@ def consense(x): custom_busmap.index = custom_busmap.index.astype(str) logger.info(f"Imported custom busmap from {snakemake.input.custom_busmap}") - cluster_config = snakemake.config.get("clustering", {}).get( - "cluster_network", - {}, - ) - clustering = clustering_for_n_clusters( n, n_clusters, custom_busmap, aggregate_carriers, - line_length_factor, - aggregation_strategies, - snakemake.config["solving"]["solver"]["name"], - cluster_config.get("algorithm", "hac"), - cluster_config.get("feature", "solar+onwind-time"), + params.length_factor, + params.aggregation_strategies, + solver_name, + params.cluster_network["algorithm"], + params.cluster_network["feature"], hvac_overhead_cost, - focus_weights, + params.focus_weights, ) update_p_nom_max(clustering.network) + if params.cluster_network.get("consider_efficiency_classes"): + labels = [f" {label} efficiency" for label in ["low", "medium", "high"]] + nc = clustering.network + nc.generators["carrier"] = nc.generators.carrier.replace(labels, "", regex=True) + clustering.network.meta = dict( snakemake.config, **dict(wildcards=dict(snakemake.wildcards)), diff --git a/workflow/scripts/plot_figures.py b/workflow/scripts/plot_figures.py index 7ee2b0d2..6a196019 100644 --- a/workflow/scripts/plot_figures.py +++ b/workflow/scripts/plot_figures.py @@ -73,7 +73,6 @@ get_capacity_brownfield, get_capacity_base, get_demand_base, - get_operational_costs, get_capital_costs, ) from add_electricity import ( @@ -117,7 +116,7 @@ def get_color_palette(n: pypsa.Network) -> pd.Series: "co2": "k", } - return pd.concat([colors, pd.Series(additional)]) + return pd.concat([colors, pd.Series(additional)]).to_dict() def get_bus_scale(interconnect: str) -> float: @@ -274,12 +273,10 @@ def plot_region_emissions_html(n: pypsa.Network, save: str, **wildcards) -> None """ # get data - emissions = get_node_emissions_timeseries(n).mul(1e-6) # T -> MT - emissions = emissions.groupby(n.buses.country, axis=1).sum() + emissions = emissions.T.groupby(n.buses.country).sum().T # plot data - fig = px.area( emissions, x=emissions.index, @@ -323,37 +320,6 @@ def plot_node_emissions_html(n: pypsa.Network, save: str, **wildcards) -> None: fig.write_html(save) -def plot_accumulated_emissions(n: pypsa.Network, save: str, **wildcards) -> None: - """ - Plots accumulated emissions. - """ - - # get data - - emissions = get_tech_emissions_timeseries(n).mul(1e-6).sum(axis=1) # T -> MT - emissions = emissions.cumsum().to_frame("co2") - - # plot - - color_palette = get_color_palette(n) - - fig, ax = plt.subplots(figsize=(14, 4)) - - emissions.plot.area( - ax=ax, - alpha=0.7, - legend="reverse", - color=color_palette.to_dict(), - ) - - ax.legend(bbox_to_anchor=(1, 1), loc="upper left") - ax.set_title(create_title("Accumulated Emissions", **wildcards)) - ax.set_ylabel("Emissions [MT]") - fig.tight_layout() - - fig.savefig(save) - - def plot_accumulated_emissions_tech(n: pypsa.Network, save: str, **wildcards) -> None: """ Plots accumulated emissions by technology. @@ -381,7 +347,7 @@ def plot_accumulated_emissions_tech(n: pypsa.Network, save: str, **wildcards) -> ax=ax, alpha=0.7, legend="reverse", - color=color_palette.to_dict(), + color=color_palette, ) ax.legend(bbox_to_anchor=(1, 1), loc="upper left") @@ -421,7 +387,7 @@ def plot_accumulated_emissions_tech_html( emissions, x=emissions.index, y=emissions.columns, - color_discrete_map=color_palette.to_dict(), + color_discrete_map=color_palette, ) title = create_title("Technology Accumulated Emissions", **wildcards) @@ -458,7 +424,7 @@ def plot_hourly_emissions_html(n: pypsa.Network, save: str, **wildcards) -> None emissions, x=emissions.index, y=emissions.columns, - color_discrete_map=color_palette.to_dict(), + color_discrete_map=color_palette, ) title = create_title("Technology Emissions", **wildcards) @@ -488,7 +454,6 @@ def plot_hourly_emissions(n: pypsa.Network, save: str, **wildcards) -> None: emissions = emissions.rename(columns=n.carriers.nice_name) # plot - color_palette = get_color_palette(n) fig, ax = plt.subplots(figsize=(14, 4)) @@ -497,7 +462,7 @@ def plot_hourly_emissions(n: pypsa.Network, save: str, **wildcards) -> None: ax=ax, alpha=0.7, legend="reverse", - color=color_palette.to_dict(), + color=color_palette, ) ax.legend(bbox_to_anchor=(1, 1), loc="upper left") @@ -546,7 +511,7 @@ def plot_production_html( energy_mix, x=energy_mix.index, y=[c for c in energy_mix.columns if c != "Demand"], - color_discrete_map=color_palette.to_dict(), + color_discrete_map=color_palette, ) fig.add_trace( go.Scatter( @@ -589,7 +554,7 @@ def plot_production_area( energy_mix[carrier + "_discharger"] = energy_mix[carrier].clip(lower=0.0001) energy_mix[carrier + "_charger"] = energy_mix[carrier].clip(upper=-0.0001) energy_mix = energy_mix.drop(columns=carrier) - # energy_mix = energy_mix[[x for x in carriers_2_plot if x in energy_mix]] + energy_mix = energy_mix.rename(columns=n.carriers.nice_name) color_palette = get_color_palette(n) @@ -607,7 +572,7 @@ def plot_production_area( energy_mix[snapshots].plot.area( ax=ax, alpha=0.7, - color=color_palette.to_dict(), + color=color_palette, ) demand[snapshots].plot.line(ax=ax, ls="-", color="darkblue") @@ -635,34 +600,29 @@ def plot_production_bar( **wildcards, ) -> None: """ - Plot production per carrier. + Plot diaptch per carrier. """ # get data - - energy_mix = ( - get_energy_timeseries(n) - # .rename(columns={"battery charger": "battery", "battery discharger": "battery"}) - .groupby(level=0, axis=1) - .sum() - .sum() - .mul(1e-3) # MW -> GW - ) - energy_mix = pd.DataFrame(energy_mix, columns=["Production"]).reset_index( - names="carrier", - ) + energy_mix = n.statistics.dispatch().mul(1e-3) # MW -> GW + energy_mix.name = "dispatch" energy_mix = energy_mix[ - energy_mix.carrier.isin([x for x in carriers_2_plot if x != "battery"]) - ].copy() - energy_mix["color"] = energy_mix.carrier.map(n.carriers.color) - energy_mix["carrier"] = energy_mix.carrier.map(n.carriers.nice_name) - - # plot + energy_mix.index.get_level_values("component").isin( + ["Generator", "StorageUnit"], + ) + ] + energy_mix = energy_mix.groupby("carrier").sum().reset_index() + color_palette = get_color_palette(n) fig, ax = plt.subplots(figsize=(10, 10)) - sns.barplot(data=energy_mix, y="carrier", x="Production", palette=energy_mix.color) + sns.barplot( + data=energy_mix, + y="carrier", + x="dispatch", + palette=color_palette, + ) - ax.set_title(create_title("Production [GWh]", **wildcards)) + ax.set_title(create_title("Dispatch [GWh]", **wildcards)) ax.set_ylabel("") # ax.set_xlabel("") fig.tight_layout() @@ -681,22 +641,20 @@ def plot_costs_bar( # get data - operational_costs = get_operational_costs(n).sum().mul(1e-9) # $ -> M$ - capital_costs = get_capital_costs(n).mul(1e-9) # $ -> M$ + opex = n.statistics.opex().mul(1e-6) # $ -> M$ + capex = get_capital_costs(n).mul(1e-6) # $ -> M$ costs = pd.concat( - [operational_costs, capital_costs], + [opex, capex], axis=1, keys=["OPEX", "CAPEX"], ).reset_index() - costs = costs[costs.carrier.isin(carriers_2_plot)] - costs["carrier"] = costs.carrier.map(n.carriers.nice_name) costs = costs.groupby("carrier").sum().reset_index() # groups batteries # plot data - fig, ax = plt.subplots(figsize=(10, 10)) - color_palette = n.carriers.reset_index().set_index("nice_name").to_dict()["color"] + color_palette = get_color_palette(n) + sns.barplot( y="carrier", x="CAPEX", @@ -830,7 +788,7 @@ def plot_demand_map( bus_values = get_demand_base(n).mul(1e-3) line_values = n.lines.s_nom - link_values = n.links.p_nom.replace(0) + link_values = n.links.p_nom.replace(to_replace={pd.NA: 0}) # plot data title = create_title("Network Demand", **wildcards) @@ -931,7 +889,7 @@ def plot_base_capacity_map( n=n, bus_values=bus_values, line_values=line_values, - link_values=n.links.p_nom.replace(0), + link_values=n.links.p_nom.replace(to_replace={pd.NA: 0}), regions=regions, line_scale=line_scale, bus_scale=bus_scale, @@ -984,7 +942,7 @@ def plot_opt_capacity_map( n=n, bus_values=bus_values, line_values=line_values, - link_values=n.links.p_nom.replace(0), + link_values=n.links.p_nom.replace(to_replace={pd.NA: 0}), regions=regions, line_scale=line_scale, bus_scale=bus_scale, @@ -1046,7 +1004,7 @@ def plot_new_capacity_map( n=n, bus_values=bus_values, line_values=line_values, - link_values=n.links.p_nom.replace(0), + link_values=n.links.p_nom.replace(to_replace={pd.NA: 0}), regions=regions, line_scale=line_scale, bus_scale=bus_scale, @@ -1181,8 +1139,7 @@ def plot_capacity_additions_bar( # plot data (option 2) # using matplotlib for tech group colours - # color_palette = get_color_palette(n) - color_palette = n.carriers.reset_index().set_index("nice_name").to_dict()["color"] + color_palette = get_color_palette(n) color_mapper = [color_palette[carrier] for carrier in capacity.index] bar_height = 0.35 @@ -1315,7 +1272,12 @@ def plot_capacity_additions_bar( retirement_method, **snakemake.wildcards, ) - plot_costs_bar(n, carriers, snakemake.output["costs_bar"], **snakemake.wildcards) + plot_costs_bar( + n, + carriers, + snakemake.output["costs_bar"], + **snakemake.wildcards, + ) plot_production_bar( n, carriers, @@ -1334,15 +1296,14 @@ def plot_capacity_additions_bar( snakemake.output["production_area_html"], **snakemake.wildcards, ) - plot_hourly_emissions(n, snakemake.output["emissions_area"], **snakemake.wildcards) - plot_hourly_emissions_html( + plot_hourly_emissions( n, - snakemake.output["emissions_area_html"], + snakemake.output["emissions_area"], **snakemake.wildcards, ) - plot_accumulated_emissions( + plot_hourly_emissions_html( n, - snakemake.output["emissions_accumulated"], + snakemake.output["emissions_area_html"], **snakemake.wildcards, ) plot_accumulated_emissions_tech( diff --git a/workflow/scripts/simplify_network.py b/workflow/scripts/simplify_network.py index eedbcf48..8add69aa 100644 --- a/workflow/scripts/simplify_network.py +++ b/workflow/scripts/simplify_network.py @@ -86,16 +86,16 @@ def aggregate_to_substations( aggregate_one_ports=["Load", "StorageUnit"], line_length_factor=1.0, bus_strategies={ - "type": np.max, - "Pd": np.sum, + "type": "max", + "Pd": "sum", }, generator_strategies={ - "marginal_cost": np.mean, - "p_nom_min": np.sum, - "p_min_pu": np.mean, - "p_max_pu": np.mean, - "ramp_limit_up": np.max, - "ramp_limit_down": np.max, + "marginal_cost": "mean", + "p_nom_min": "sum", + "p_min_pu": "mean", + "p_max_pu": "mean", + "ramp_limit_up": "max", + "ramp_limit_down": "max", }, ) @@ -129,13 +129,15 @@ def aggregate_to_substations( network_s.buses["x"] = substations.x network_s.buses["y"] = substations.y network_s.buses["substation_lv"] = True - network_s.buses["substation_off"] = True network_s.buses["country"] = ( zone # country field used bc pypsa-eur aggregates based on country boundary ) - network_s.buses["state"] = substations.state - network_s.buses["balancing_area"] = substations.balancing_area network_s.lines["type"] = np.nan + + network_s.buses.drop( + columns=["balancing_area", "state", "substation_off", "sub_id"], + inplace=True, + ) return network_s diff --git a/workflow/scripts/summary.py b/workflow/scripts/summary.py index 545a4900..67bd5895 100644 --- a/workflow/scripts/summary.py +++ b/workflow/scripts/summary.py @@ -255,70 +255,8 @@ def _economic_retirement(c: str) -> pd.DataFrame: ### -def get_operational_costs(n: pypsa.Network) -> pd.DataFrame: - - def _get_energy_one_port(c: pypsa.components.Component) -> pd.DataFrame: - return c.pnl.p.abs() - - def _get_energy_multi_port(c: pypsa.components.Component) -> pd.DataFrame: - return c.pnl.p0.abs() - - totals = [] - for c in n.iterate_components(n.one_port_components | n.branch_components): - if c.name in ("Generator", "StorageUnit", "Store"): - production = _get_energy_one_port(c) - elif c.name in ("Link"): - production = _get_energy_multi_port(c) - else: - continue - - marginal_cost = c.pnl.marginal_cost - marginal_cost_static = {} - for item in [x for x in c.df.index if x not in marginal_cost.columns]: - marginal_cost_static[item] = [c.df.at[item, "marginal_cost"]] * len( - marginal_cost, - ) - marginal_cost = pd.concat( - [ - marginal_cost, - pd.DataFrame(marginal_cost_static, index=marginal_cost.index), - ], - axis=1, - ) - - opex = ( - (production * marginal_cost).fillna(0).groupby(c.df.carrier, axis=1).sum() - ) - - totals.append(opex) - - return pd.concat(totals, axis=1) - - def get_capital_costs(n: pypsa.Network) -> pd.DataFrame: - - def _get_new_capacity_MW(c: pypsa.components.Component) -> pd.DataFrame: - return (c.df.p_nom_opt - c.df.p_nom).map(lambda x: x if x > 0 else 0) - - def _get_new_capacity_MWh(c: pypsa.components.Component) -> pd.DataFrame: - return (c.df.e_nom_opt - c.df.e_nom).map(lambda x: x if x > 0 else 0) - - totals = [] - for c in n.iterate_components(n.one_port_components | n.branch_components): - if c.name in ("Generator", "StorageUnit", "Link"): - new_capacity = _get_new_capacity_MW(c) - elif c.name in ("Store"): - new_capacity = _get_new_capacity_MWh(c) - else: - continue - - capital_costs = c.df.capital_cost - - capex = (new_capacity * capital_costs).fillna(0).groupby(c.df.carrier).sum() - - totals.append(capex) - - return pd.concat(totals) + return n.statistics.capex() - n.statistics.installed_capex() ### @@ -342,26 +280,36 @@ def get_node_emissions_timeseries(n: pypsa.Network) -> pd.DataFrame: eff_static[gen] = [c.df.at[gen, "efficiency"]] * len(eff) eff = pd.concat([eff, pd.DataFrame(eff_static, index=eff.index)], axis=1) - co2_factor = c.df.carrier.map(n.carriers.co2_emissions).fillna(0) + co2_factor = ( + c.df.carrier.map(n.carriers.co2_emissions) + .fillna(0) + .infer_objects(copy=False) + ) totals.append( ( c.pnl.p.mul(1 / eff) .mul(co2_factor) - .groupby(n.generators.bus, axis=1) + .T.groupby(n.generators.bus) .sum() + .T ), ) elif c.name == "Link": # efficiency taken into account by using p0 - co2_factor = c.df.carrier.map(n.carriers.co2_emissions).fillna(0) + co2_factor = ( + c.df.carrier.map(n.carriers.co2_emissions) + .fillna(0) + .infer_objects(copy=False) + ) totals.append( ( c.pnl.p0.mul(co2_factor) - .groupby(n.links.bus0, axis=1) + .T.groupby(n.links.bus0) .sum() .rename_axis(index={"bus0": "bus"}) + .T ), ) return pd.concat(totals, axis=1) @@ -389,16 +337,21 @@ def get_tech_emissions_timeseries(n: pypsa.Network) -> pd.DataFrame: ( c.pnl.p.mul(1 / eff) .mul(co2_factor) - .groupby(n.generators.carrier, axis=1) + .T.groupby(n.generators.carrier) .sum() + .T ), ) elif c.name == "Link": # efficiency taken into account by using p0 - co2_factor = c.df.carrier.map(n.carriers.co2_emissions).fillna(0) + co2_factor = ( + c.df.carrier.map(n.carriers.co2_emissions) + .fillna(0) + .infer_objects(copy=False) + ) totals.append( - (c.pnl.p0.mul(co2_factor).groupby(n.links.carrier, axis=1).sum()), + (c.pnl.p0.mul(co2_factor).T.groupby(n.links.carrier).sum().T), ) return pd.concat(totals, axis=1) diff --git a/workflow/tests/test_yaml_structure.py b/workflow/tests/test_yaml_structure.py new file mode 100644 index 00000000..099593d8 --- /dev/null +++ b/workflow/tests/test_yaml_structure.py @@ -0,0 +1,57 @@ +import yaml + + +def load_yaml_file(filepath): + with open(filepath) as file: + return yaml.safe_load(file) + + +def compare_structures(data1, data2, path=""): + if type(data1) != type(data2): + print( + f"Type mismatch at {path}: {type(data1).__name__} vs {type(data2).__name__}", + ) + return False + + if isinstance(data1, dict): + for key in data1: + if key not in data2: + print(f"Missing key in second structure at {path}: {key}") + continue + compare_structures( + data1[key], + data2[key], + path=f"{path}.{key}" if path else key, + ) + for key in data2: + if key not in data1: + print(f"Missing key in first structure at {path}: {key}") + return True + elif isinstance(data1, list): + # For simplicity, just compare the first item if it exists, assuming homogeneous lists + if data1 and data2: + compare_structures(data1[0], data2[0], path=f"{path}[0]") + elif not data1 and data2 or data1 and not data2: + print(f"List length mismatch or one is empty at {path}") + return True + else: + # This part ignores values if they are not container types + return True + + +def test_yaml_structure(filepath1, filepath2): + data1 = load_yaml_file(filepath1) + data2 = load_yaml_file(filepath2) + + print("Comparing structure...") + if compare_structures(data1, data2): + print("The structures match.") + else: + print("The structures do not match.") + + +# Example usage +test_yaml_structure( + "../config/tests/config.test_simple.yaml", + "../config/config.default.yaml", +)