Skip to content

Commit

Permalink
Merge pull request #566 from agoenergy/develop
Browse files Browse the repository at this point in the history
release 2.1
  • Loading branch information
wingechr authored Sep 17, 2024
2 parents ae0024e + 7716072 commit 5f6b7d4
Show file tree
Hide file tree
Showing 27 changed files with 5,341 additions and 5,000 deletions.
2 changes: 1 addition & 1 deletion .bumpversion.cfg
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
[bumpversion]
current_version = 2.0.4
current_version = 2.1.0
commit = True
tag = True

Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/publish.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,4 +23,4 @@ jobs:
uses: docker/build-push-action@v5
with:
push: true
tags: wingechr/ptx-boa:2.0.4
tags: wingechr/ptx-boa:2.1.0
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -167,3 +167,6 @@ cython_debug/

# cached optimization results:
ptxboa/cache

# tests output:
tests/out
8 changes: 8 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,13 @@
# CHANGELOG.md

## Upcoming version

## 2.1 (2024-09-17)

- fix bug regarding snapshot weightings ([#548](https://github.com/agoenergy/ptx-boa/pull/549))
- minor changes to calculation of electricity cost / cost scaling
- changes in optimization module

## 2.0.4 (2024-09-11)

- Disable Green Iron cost calculations for maintenance
Expand Down
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
FROM python:3.10-slim
LABEL version="2.0.4"
LABEL version="2.1.0"

RUN apt-get update
RUN apt-get install -y git
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ scp -r ptxboa2:ptx-boa_offline_optimization/optimization_cache/* .
# connect to server
ssh ptxboa
# pull latest image from dockerhub
VERSION=2.0.4
VERSION=2.1.0
docker pull wingechr/ptx-boa:$VERSION
# stop and delete the currently running container "app"
docker stop app
Expand Down
1 change: 1 addition & 0 deletions app/sidebar.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@ def main_settings(api):
"Product:",
[
"Ammonia",
"Green Iron",
"Hydrogen",
"LOHC",
"Methane",
Expand Down
2 changes: 1 addition & 1 deletion app/tab_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

from app.ptxboa_functions import read_markdown_file

__version__ = "2.0.4"
__version__ = "2.1.0"


def content_info():
Expand Down
3 changes: 3 additions & 0 deletions app/tab_input_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,9 @@ def content_input_data(api: PtxboaAPI) -> None:
(
"The unit of CAPEX and OPEX (fix) is USD/t for Green iron "
"reduction and USD/kW for all other processes."
"\n\n"
"The unit of efficiency is 0.01 t/MWh for Green Iron "
"reduction and % for all other processes."
)
)
display_and_edit_input_data(
Expand Down
4 changes: 3 additions & 1 deletion app/tab_optimization.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,9 @@ def calc_aggregate_statistics(
res.at[g, "CAPEX (USD/kW)"] = (
n.links.at[g, "capital_cost"] / n.links.at[g, "efficiency"]
)
res.at[g, "OPEX (USD/kWh)"] = n.links.at[g, "marginal_cost"]
res.at[g, "OPEX (USD/kWh)"] = (
n.links.at[g, "marginal_cost"] / n.links.at[g, "efficiency"]
)

for g in ["EL_STR"]:
if g in n.storage_units.index:
Expand Down
2 changes: 1 addition & 1 deletion flh_opt/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,4 @@
# This version string will be used to create cache hashes
# of the optimization runs. When changed, all optimizations
# will have to be re-calculated.
__version__ = "2024-05-14"
__version__ = "2024-09-13"
98 changes: 79 additions & 19 deletions flh_opt/api_opt.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,13 @@
"""API interface for FLH optimizer."""
import math
import os
from typing import List, Optional
from typing import List, Literal, Optional

import pandas as pd
from pypsa import Network
from pypsa.descriptors import get_bounds_pu
from pypsa.optimization.common import reindex
from xarray import DataArray

from flh_opt._types import OptInputDataType, OptOutputDataType

Expand Down Expand Up @@ -63,8 +66,8 @@ def _add_link(
# input data is per main output,
# pypsa link parameters are defined per main input
capital_cost=(input_data[name]["CAPEX_A"] + input_data[name]["OPEX_F"])
/ input_data[name]["EFF"],
marginal_cost=input_data[name]["OPEX_O"] / input_data[name]["EFF"],
* input_data[name]["EFF"],
marginal_cost=input_data[name]["OPEX_O"] * input_data[name]["EFF"],
p_nom_extendable=True,
)
# add conversion efficiencies and buses for secondary input / output
Expand All @@ -73,10 +76,75 @@ def _add_link(
# input data is per main output,
# pypsa link parameters are defined per main input
n.links.at[name, f"efficiency{i+2}"] = (
-input_data[name]["CONV"][c] / input_data[name]["EFF"]
-input_data[name]["CONV"][c] * input_data[name]["EFF"]
)


def get_flh(n: Network, g: str, component_type: Literal["Generator", "Link"]) -> float:
"""Calculate full load hours.
Returns a value between 0 and 1.
"""
if component_type == "Generator":
sw = n.snapshot_weightings["generators"]
gen = (n.generators_t["p"][g] * sw).sum()
p_nom = n.generators.at[g, "p_nom_opt"]
if component_type == "Link":
sw = n.snapshot_weightings["generators"]
gen = (n.links_t["p0"][g] * sw).sum()
p_nom = n.links.at[g, "p_nom_opt"]
if gen == 0:
flh = 0
else:
flh = gen / p_nom / 8760
return flh


def scale_storage_soc_upper_bounds(n: Network):
"""Scale the upper bounds of storage SOC with snapshot weightings.
We need to do this because of the week scaling and the fixed correlation
between charge capacity and state of charge for electricity storage.
In the storage balance, the effect of charging and discharging on SOC
is scaled with snapshot weightings.
This function also scales the storage capacity itself
with the snapshot weightings.
"""
# if model has not yet been created, do it now:
if not hasattr(n, "model"):
n.optimize.create_model()

# get list of extendable storage units:
ext_i = n.get_extendable_i("StorageUnit")

# get max_hours attribute of these storage units:
max_hours = get_bounds_pu(
n, "StorageUnit", n.snapshots, index=ext_i, attr="state_of_charge"
)[1]

# multiply max_hours with snapshot weightings:
scaled_bounds = max_hours.copy()
for c in max_hours.columns:
scaled_bounds[c] = max_hours[c] * n.snapshot_weightings["stores"]
sb = DataArray(scaled_bounds, dims=["snapshot", "StorageUnit-ext"])

# get state of charge and charge capacity variables:
soc = reindex(
n.model.variables["StorageUnit-state_of_charge"], "StorageUnit", ext_i
)
p_nom = n.model.variables["StorageUnit-p_nom"]

# create left hand side of equation:
lhs = soc - p_nom * sb

# remove old constraint:
n.model.remove_constraints("StorageUnit-ext-state_of_charge-upper")

# and add the new one:
n.model.add_constraints(lhs, "<=", 0, name="StorageUnit-ext-state_of_charge-upper")


def optimize(
input_data: OptInputDataType, profiles_path: str = "flh_opt/renewable_profiles"
) -> tuple[OptOutputDataType, Network]:
Expand Down Expand Up @@ -404,29 +472,21 @@ def add_storage(n: Network, input_data: dict, name: str, bus: str) -> None:

n.snapshot_weightings["generators"] = weights
n.snapshot_weightings["objective"] = weights
n.snapshot_weightings["stores"] = 1
n.snapshot_weightings["stores"] = weights

# import profiles to network:
n.import_series_from_dataframe(res_profiles, "Generator", "p_max_pu")

# scale storage SOC constraints:
scale_storage_soc_upper_bounds(n)

# solve optimization problem:
model_status = n.optimize(solver_name="highs", solver_options=solver_options)
model_status = n.optimize.solve_model(
solver_name="highs", solver_options=solver_options
)

# calculate results:

def get_flh(n: Network, g: str, component_type: str) -> float:
if component_type == "Generator":
gen = n.generators_t["p"][g].mean()
p_nom = n.generators.at[g, "p_nom_opt"]
if component_type == "Link":
gen = n.links_t["p0"][g].mean()
p_nom = n.links.at[g, "p_nom_opt"]
if gen == 0:
flh = 0
else:
flh = gen / p_nom
return flh

result_data = {}

# store model status:
Expand Down
2 changes: 2 additions & 0 deletions md/info_generation_profile_figure.md
Original file line number Diff line number Diff line change
@@ -1 +1,3 @@
This figure shows output of renewable generators, electrolyzer and derivative production over time. The vertical lines delimit the eight characteristic weeks that are modeled.

Note: If Green Iron is selected as product, the unit for the derivate production process is t/h.
6 changes: 6 additions & 0 deletions md/info_optimization_results.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,9 @@ This table shows aggregated results of the optimization:
- the costs per MWh of final product.

Please note that transportation is not part of the optimization model. The costs shown here (unlike those in the **Costs** tab) are per MWh of final product produced in the supply country, and do not include costs or losses during transportation.

Please note that if Green Iron is selected as product, other units are used:

- Capacity: t/h instead of MW
- Output: t/a instead of MWh/a
- Cost: USD/t instead of USD/MWh
71 changes: 54 additions & 17 deletions ptxboa/api_calc.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,16 @@ def calculate(data: CalculateDataType) -> pd.DataFrame:
# start main chain calculation
main_output_value = 1 # start with normalized value of 1

# pre-calculate main_output_value before transport
# for correct scaling of storeages.
# storage units use capacity factor CAP_F
# per produced unit (before transport losses)
main_output_value_before_transport = main_output_value
for step_data in data["main_process_chain"]:
main_output_value_before_transport *= step_data["EFF"]

# accumulate needed electric input
step_before_transport = True
sum_el = main_output_value
results = []

Expand All @@ -42,26 +51,39 @@ def calculate(data: CalculateDataType) -> pd.DataFrame:
}
result_process_type = df_processes.at[process_code, "result_process_type"]

main_input_value = main_output_value

eff = step_data["EFF"]
main_output_value = main_input_value * eff

# storage efficiency must not affect main chain scaling factors:
if process_code not in ["EL-STR", "H2-STR"]:
main_input_value = main_output_value
main_output_value = main_input_value * eff

opex_o = step_data["OPEX-O"]

if not is_transport:
flh = step_data["FLH"]
liefetime = step_data["LIFETIME"]
capex = step_data["CAPEX"]
lifetime = step_data["LIFETIME"]
capex_rel = step_data["CAPEX"]
opex_f = step_data["OPEX-F"]
capacity = main_output_value / flh
capex = capacity * capex
capex_ann = annuity(wacc, liefetime, capex)

if "CAP_F" in step_data:
# Storage unit: capacity
# TODO: double check units (division by 8760 h)?
capacity = (
main_output_value_before_transport * step_data["CAP_F"] / 8760
)
else:
capacity = main_output_value / flh

capex = capacity * capex_rel
capex_ann = annuity(wacc, lifetime, capex)
opex = opex_f * capacity + opex_o * main_output_value

results.append((result_process_type, process_code, "CAPEX", capex_ann))
results.append((result_process_type, process_code, "OPEX", opex))

else:
step_before_transport = False
opex_t = step_data["OPEX-T"]
dist_transport = step_data["DIST"]
opex_ot = opex_t * dist_transport
Expand All @@ -82,14 +104,14 @@ def calculate(data: CalculateDataType) -> pd.DataFrame:
]

# no FLH
liefetime = sec_process_data["LIFETIME"]
lifetime = sec_process_data["LIFETIME"]
capex = sec_process_data["CAPEX"]
opex_f = sec_process_data["OPEX-F"]
opex_o = sec_process_data["OPEX-O"]

capacity = flow_value # no FLH
capex = capacity * capex
capex_ann = annuity(wacc, liefetime, capex)
capex_ann = annuity(wacc, lifetime, capex)
opex = opex_f * capacity + opex_o * flow_value

results.append(
Expand All @@ -101,9 +123,13 @@ def calculate(data: CalculateDataType) -> pd.DataFrame:

for sec_flow_code, sec_conv in sec_process_data["CONV"].items():
sec_flow_value = flow_value * sec_conv
if sec_flow_code == "EL":

# electricity before transport will be handled by RES step
# after transport: market
if sec_flow_code == "EL" and step_before_transport:
sum_el += sec_flow_value
# TODO: in this case: no cost?
# do not add SPECCOST below
continue

sec_speccost = parameters["SPECCOST"][sec_flow_code]
sec_flow_cost = sec_flow_value * sec_speccost
Expand All @@ -125,12 +151,16 @@ def calculate(data: CalculateDataType) -> pd.DataFrame:
else:
# use market
speccost = parameters["SPECCOST"][flow_code]
if flow_code == "EL":

# electricity before transport will be handled by RES step
# after transport: market
if flow_code == "EL" and step_before_transport:
sum_el += flow_value
# TODO: in this case: no cost?
# do not add SPECCOST below
continue

flow_cost = flow_value * speccost

# TODO: not nice
if is_transport:
flow_cost = flow_cost * dist_transport

Expand All @@ -151,9 +181,16 @@ def calculate(data: CalculateDataType) -> pd.DataFrame:
results = results.groupby(dim_columns).sum().reset_index()

# normalization:
# scale so that we star twith 1 EL input,
# scale so that we start with 1 EL input,
# rescale so that we have 1 unit output
norm_factor = sum_el / main_output_value
norm_factor = 1 / main_output_value
results["values"] = results["values"] * norm_factor

# rescale again ONLY RES to account for additionally needed electricity
# sum_el is larger than 1.0
norm_factor_el = sum_el
idx = results["process_type"] == "Electricity generation"
assert idx.any() # must have at least one entry
results.loc[idx, "values"] = results.loc[idx, "values"] * norm_factor_el

return results
Loading

0 comments on commit 5f6b7d4

Please sign in to comment.