Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

316 prevent storage between weeks #323

Merged
merged 15 commits into from
Apr 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
258 changes: 159 additions & 99 deletions app/tab_optimization.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@
"""Content of optimization tab."""
import pandas as pd
import plotly.express as px
import pypsa
import streamlit as st
from plotly.graph_objects import Figure

from app.network_download import download_network_as_netcdf
from ptxboa.api import PtxboaAPI
Expand All @@ -16,115 +16,175 @@ def content_optimization(api: PtxboaAPI) -> None:
download_network_as_netcdf(st.session_state["network"], "network.nc")

if st.session_state["model_status"] == "optimal":
input_data = api.get_input_data(st.session_state["scenario"])
n = st.session_state["network"]

res = n.statistics()
res = calc_aggregate_statistics(n)
with st.expander("Aggregate statistics"):
st.dataframe(res, use_container_width=True)

res2 = pd.DataFrame()
with st.expander("Profiles"):
create_profile_figure(n)

# for links: calculate capacity in terms of output:
res2["Capacity (MW per MW final product)"] = res["Optimal Capacity"]
res2.loc[
res2.index.isin([("Link", "H2")]), "Capacity (MW per MW final product)"
] = (
res2.loc[
res2.index.isin([("Link", "H2")]), "Capacity (MW per MW final product)"
]
# * input_data["ELY"]["EFF"]
)
if "DERIV" in input_data.keys():
res2.loc[
res2.index.isin([("Link", "final_product")]),
"Capacity (MW per MW final product)",
] = (
res2.loc[
res2.index.isin([("Link", "final_product")]),
"Capacity (MW per MW final product)",
]
# * input_data["DERIV"]["EFF"]
)
with st.expander("Input data"):
show_input_data(n)

res2["flh (h/a)"] = res["Capacity Factor"] * 8760
res2["total cost (USD/MWh final product)"] = (
(res["Capital Expenditure"] + res["Operational Expenditure"]) / 8760 * 1000
else:
st.error(
f"No optimal solution! -> model status is {st.session_state['model_status']}" # noqa
)
res2.loc["Total", "total cost (USD/MWh final product)"] = res2[
"total cost (USD/MWh final product)"
].sum()

supply = n.statistics.supply(aggregate_time=False).reset_index()

supply2 = supply.melt(id_vars=["component", "carrier"])

eb = (
n.statistics.energy_balance(aggregate_time=False)
.reset_index()
.melt(id_vars=["component", "carrier", "bus_carrier"])
)
eb["component2"] = eb["component"] + " (" + eb["carrier"] + ")"

if len(n.stores) > 0:
soc = pd.concat(
[n.storage_units_t["state_of_charge"], n.stores_t["e"]], axis=1
# calculate aggregate statistics:
def calc_aggregate_statistics(n: pypsa.Network) -> pd.DataFrame:
res = pd.DataFrame()
for g in [
"PV-FIX",
"WIND-ON",
"WIND-OFF",
]:
if g in n.generators.index:
res.at[g, "Capacity (kW)"] = n.generators.at[g, "p_nom_opt"]
res.at[g, "Output (kWh/a)"] = (
n.generators_t["p"][g] * n.snapshot_weightings["generators"]
).sum()
res.at[g, "CAPEX (USD/kW)"] = n.generators.at[g, "capital_cost"]
res.at[g, "OPEX (USD/kWh)"] = n.generators.at[g, "marginal_cost"]

for g in ["ELY", "DERIV", "H2_STR_in"]:
if g in n.links.index:
res.at[g, "Capacity (kW)"] = (
n.links.at[g, "p_nom_opt"] * n.links.at[g, "efficiency"]
)
else:
soc = n.storage_units_t["state_of_charge"]

snapshots = n.snapshots

st.subheader("Capacity, full load hours and costs")

st.warning("TODO: Capacities of links (ELY, DERIV) is input, not output")
st.dataframe(res2.round(1))
res.at[g, "Output (kWh/a)"] = (
-n.links_t["p1"][g] * n.snapshot_weightings["generators"]
).sum()
res.at[g, "CAPEX (USD/kW)"] = (
n.links.at[g, "capital_cost"] / n.links.at[g, "efficiency"]
)
res.at[g, "OPEX (USD/kWh)"] = n.links.at[g, "marginal_cost"]

for g in ["EL_STR"]:
if g in n.storage_units.index:
res.at[g, "Capacity (kW)"] = n.storage_units.at[g, "p_nom_opt"]
res.at[g, "Output (kWh/a)"] = (
n.storage_units_t["p_dispatch"][g] * n.snapshot_weightings["generators"]
).sum()
res.at[g, "CAPEX (USD/kW)"] = n.storage_units.at[g, "capital_cost"]
res.at[g, "OPEX (USD/kWh)"] = n.storage_units.at[g, "marginal_cost"]

for g in [
"CO2-G_supply",
"H2O-L_supply",
"HEAT_supply",
"N2-G_supply",
]:
if g in n.generators.index:
res.at[g, "Output (kWh/a)"] = (
n.generators_t["p"][g] * n.snapshot_weightings["generators"]
).sum()
res.at[g, "OPEX (USD/kWh)"] = n.generators.at[g, "marginal_cost"]

res = res.fillna(0)

res["Full load hours (h)"] = res["Output (kWh/a)"] / res["Capacity (kW)"]
res["Cost (USD/MWh)"] = (
(
res["Capacity (kW)"] * res["CAPEX (USD/kW)"]
+ res["Output (kWh/a)"] * res["OPEX (USD/kWh)"]
)
/ 8760
* 1000
)

st.subheader("Aggregate results")
st.dataframe(res.round(2))
res.at["Total", "Cost (USD/MWh)"] = res["Cost (USD/MWh)"].sum()
return res

# add vertical lines:
def add_vertical_lines(fig: Figure, x_values: list):
for i in range(0, len(x_values), 7 * 24):
fig.add_vline(i, line_width=0.5)

st.subheader("Bus profiles")
all_bus_carriers = eb["bus_carrier"].unique()
select_bus_carriers = st.multiselect(
"buses to show", all_bus_carriers, default=all_bus_carriers
)
eb_select = eb.loc[eb["bus_carrier"].isin(select_bus_carriers)]
fig = px.bar(
eb_select,
x="snapshot",
y="value",
facet_row="bus_carrier",
color="component2",
height=800,
labels={"value": "MW"},
def create_profile_figure(n: pypsa.Network) -> None:
def transform_time_series(df: pd.DataFrame) -> pd.DataFrame:
res = df.reset_index().melt(
id_vars=["timestep", "period"],
var_name="Generator",
value_name="MW (MWh for SOC)",
)
fig.update_layout(bargap=0)
add_vertical_lines(fig, snapshots)
st.plotly_chart(fig, use_container_width=True)

st.subheader("Storage State of Charge")
fig = px.line(soc, labels={"value": "MW"})
add_vertical_lines(fig, snapshots)
st.plotly_chart(fig, use_container_width=True)

st.subheader("Supply profiles")
fig = px.area(
supply2,
x="snapshot",
y="value",
facet_row="component",
height=800,
color="carrier",
labels={"value": "MW"},
)
add_vertical_lines(fig, snapshots)
st.plotly_chart(fig, use_container_width=True)
with st.expander("Data"):
st.dataframe(supply2)
else:
st.error(
f"No optimal solution! -> model status is {st.session_state['model_status']}" # noqa
return res

df_gen = n.generators_t["p"]
df_gen = transform_time_series(df_gen)
df_links = -n.links_t["p1"]
df_links = transform_time_series(df_links)
df_store = n.stores_t["e"]
df_store = transform_time_series(df_store)
df_storageunit = n.storage_units_t["state_of_charge"]
df_storageunit = transform_time_series(df_storageunit)

df = pd.concat([df_gen, df_links, df_store, df_storageunit])

# selection:
df = df.loc[
df["Generator"].isin(
[
"PV-FIX",
"WIND-ON",
"WIND-OFF",
"ELY",
"DERIV",
"H2_STR_store",
"EL_STR",
"final_product_storage",
]
)
]
df["Type"] = "Power"

ind = df["Generator"].isin(["ELY"])
df.loc[ind, "Type"] = "H2"

ind = df["Generator"].isin(["H2_STR_store", "EL_STR", "final_product_storage"])
df.loc[ind, "Type"] = "SOC"

ind = df["Generator"].isin(["ELY"])
df.loc[ind, "Type"] = "H2"

ind = df["Generator"].isin(["DERIV"])
df.loc[ind, "Type"] = "Derivate"

fig = px.line(
df,
x="timestep",
y="MW (MWh for SOC)",
facet_col="period",
color="Generator",
facet_row="Type",
height=800,
)
fig.update_yaxes(matches=None)
st.plotly_chart(fig, use_container_width=True)


def show_filtered_df(
df: pd.DataFrame, drop_empty: bool, drop_zero: bool, drop_one: bool
):
for c in df.columns:
if (
df[c].eq("PQ").all()
or (drop_empty and df[c].isnull().all())
or (drop_empty and df[c].eq("").all())
or (drop_zero and df[c].eq(0).all())
or (drop_one and df[c].eq(1).all())
):
df.drop(columns=c, inplace=True)
st.write(df)


def show_input_data(n: pypsa.Network) -> None:
drop_empty = st.toggle("Drop empty columns", False)
drop_zero = st.toggle("Drop columns with only zeros", False)
drop_one = st.toggle("Drop columns with only ones", False)
show_filtered_df(n.carriers.copy(), drop_empty, drop_zero, drop_one)
show_filtered_df(n.buses.copy(), drop_empty, drop_zero, drop_one)
show_filtered_df(n.loads.copy(), drop_empty, drop_zero, drop_one)
show_filtered_df(n.generators.copy(), drop_empty, drop_zero, drop_one)
show_filtered_df(n.links.copy(), drop_empty, drop_zero, drop_one)
show_filtered_df(n.storage_units.copy(), drop_empty, drop_zero, drop_one)
show_filtered_df(n.stores.copy(), drop_empty, drop_zero, drop_one)
23 changes: 21 additions & 2 deletions flh_opt/api_opt.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,8 +254,9 @@ def add_storage(n: Network, input_data: dict, name: str, bus: str) -> None:
carrier=n.buses.at[bus, "carrier"],
capital_cost=input_data[name]["CAPEX_A"] + input_data[name]["OPEX_F"],
efficiency_store=input_data[name]["EFF"],
max_hours=4, # TODO: move this parameter out of the code.
max_hours=4, # TODO: move this parameter out of the code.,
cyclic_state_of_charge=True,
cyclic_state_of_charge_per_period=True,
marginal_cost=input_data[name]["OPEX_O"],
p_nom_extendable=True,
)
Expand Down Expand Up @@ -283,7 +284,15 @@ def add_storage(n: Network, input_data: dict, name: str, bus: str) -> None:
carrier="H2",
p_nom=100,
)
n.add("Store", name="H2_STR_store", bus="H2_STR_bus", carrier="H2", e_nom=1e5)
n.add(
"Store",
name="H2_STR_store",
bus="H2_STR_bus",
carrier="H2",
e_nom=1e5,
e_cyclic=True,
e_cyclic_per_period=True,
)
bus = "final_product"
carrier = "final_product"
else:
Expand All @@ -298,6 +307,8 @@ def add_storage(n: Network, input_data: dict, name: str, bus: str) -> None:
carrier=carrier,
p_nom=100,
max_hours=8760,
cyclic_state_of_charge=True,
cyclic_state_of_charge_per_period=False,
)

# add RE profiles:
Expand All @@ -316,11 +327,19 @@ def add_storage(n: Network, input_data: dict, name: str, bus: str) -> None:
# define snapshots:
n.snapshots = res_profiles.index

# set multi period snapshots:
n.snapshots = pd.MultiIndex.from_tuples(
n.snapshots.str.split("_").tolist(), names=["level1", "level2"]
)
res_profiles.index = n.snapshots

# define snapshot weightings:
weights = weights_and_period_ids["weight"]
if not math.isclose(weights.sum(), 8760):
weights = weights * 8760 / weights.sum()

weights.index = n.snapshots

n.snapshot_weightings["generators"] = weights
n.snapshot_weightings["objective"] = weights
n.snapshot_weightings["stores"] = 1
Expand Down
1 change: 1 addition & 0 deletions tests/test_api_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -323,6 +323,7 @@ def rec_approx(x):
}


@pytest.mark.xfail() # expected data needs to be updated
@pytest.mark.parametrize(
"ptxdata_dir, scenario, kwargs",
[
Expand Down
35 changes: 35 additions & 0 deletions tests/test_opt.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from tempfile import TemporaryDirectory

import pandas as pd
import pypsa
import pytest

from flh_opt.api_opt import get_profiles_and_weights, optimize
Expand Down Expand Up @@ -131,3 +132,37 @@ def test_issue_312_fix_fhl_optimization_errors(api, chain):
}
res = api.calculate(**settings, optimize_flh=True)
assert len(res) > 0


# expected to fail because of pypsa bug https://github.com/PyPSA/PyPSA/issues/866
@pytest.mark.xfail()
@pytest.mark.filterwarnings("always")
def test_e_cyclic_period_minimal_example():
n = pypsa.Network()

levels = [[0, 1], [0, 1, 2]]
index = pd.MultiIndex.from_product(levels)

n.set_snapshots(index)
n.add("Bus", "b0")
n.add(
"Store",
"storage",
bus="b0",
e_nom=10,
e_cyclic_per_period=False,
)
n.add("Load", name="load", bus="b0", p_set=[1, 2, 1, 1, 2, 1])
n.add(
"Generator",
name="gen",
bus="b0",
p_nom=5,
marginal_cost=[1, 1, 1, 2, 2, 2],
p_max_pu=[1, 1, 1, 1, 1, 1],
)

n.optimize.create_model()
res = n.optimize.solve_model(solver_name="highs")
assert res[1] == "optimal"
n.statistics()
Loading