Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add_bus_gen #59

Merged
merged 20 commits into from
Feb 2, 2025
Merged
Show file tree
Hide file tree
Changes from 13 commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
4975b8d
Fix_add_bus_gen
Margherita-Capitani Jan 3, 2025
14d1634
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jan 3, 2025
e77ca0c
Update_multi_microgrid
Margherita-Capitani Jan 9, 2025
34160bc
Merge branch 'Multi_Microgrid' of https://github.com/Margherita-Capit…
Margherita-Capitani Jan 9, 2025
78e3f10
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jan 9, 2025
5b003f6
Fix_error_in_create_network
Margherita-Capitani Jan 9, 2025
4445b3f
Merge branch 'Multi_Microgrid' of https://github.com/Margherita-Capit…
Margherita-Capitani Jan 9, 2025
19f2742
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jan 9, 2025
bf64f79
Fix_Add_electricity
Margherita-Capitani Jan 13, 2025
f5efc55
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jan 13, 2025
ab97a11
Last_fix
Margherita-Capitani Jan 14, 2025
87ef704
Merge branch 'Multi_Microgrid' of https://github.com/Margherita-Capit…
Margherita-Capitani Jan 14, 2025
8212178
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jan 14, 2025
f80de98
Fix_after_comment
Margherita-Capitani Jan 21, 2025
b1c1469
New_fix
Margherita-Capitani Jan 21, 2025
8ed3873
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jan 21, 2025
2950308
Little_Fix
Margherita-Capitani Jan 21, 2025
26e790d
Merge branch 'Multi_Microgrid' of https://github.com/Margherita-Capit…
Margherita-Capitani Jan 21, 2025
56091e0
Last_fix
Margherita-Capitani Jan 21, 2025
c3a0bc4
FIx_build_demand
Margherita-Capitani Jan 30, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Snakefile
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,6 @@ rule build_demand:
},
sample_profile=PROFILE,
building_csv="resources/buildings/buildings_type.csv",
create_network="networks/base.nc",
microgrid_shapes="resources/shapes/microgrid_shapes.geojson",
clusters_with_buildings="resources/buildings/cluster_with_buildings.geojson",
output:
Expand Down Expand Up @@ -147,6 +146,7 @@ rule build_shapes:
rule create_network:
input:
clusters="resources/buildings/clustered_buildings.geojson",
load="resources/demand/microgrid_load.csv",
output:
"networks/base.nc",
log:
Expand Down
115 changes: 52 additions & 63 deletions scripts/add_electricity.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,20 +87,13 @@ def _add_missing_carriers_from_costs(n, costs, carriers):

def load_costs(tech_costs, config, elec_config, Nyears=1):
"""
Set all asset costs and other parameters.
set all asset costs and other parameters
"""
idx = pd.IndexSlice
costs = pd.read_csv(tech_costs, index_col=list(range(3))).sort_index()

costs.loc[
costs.unit.str.contains("/kWel"), "value"
] *= 1e3 # Convert EUR/kW to EUR/MW
costs.loc[
costs.unit.str.contains("/kWh"), "value"
] *= 1e3 # Convert EUR/kWh to EUR/MWh
costs.loc[costs.unit.str.contains("USD"), "value"] *= config[
"USD2013_to_EUR2013"
] # Convert USD to EUR
# correct units to MW and EUR
costs.loc[costs.unit.str.contains("/kW"), "value"] *= 1e3
costs.loc[costs.unit.str.contains("USD"), "value"] *= config["USD2013_to_EUR2013"]

costs = (
costs.loc[idx[:, config["year"], :], "value"]
Expand Down Expand Up @@ -156,13 +149,17 @@ def costs_for_storage(store, link1, link2=None, max_hours=1.0):

max_hours = elec_config["max_hours"]
costs.loc["battery"] = costs_for_storage(
costs.loc["lithium"],
costs.loc[
"lithium"
], # line 119 in file costs.csv' which was battery storage was modified into lithium (same values left)
costs.loc["battery inverter"],
max_hours=max_hours["battery"],
)

max_hours = elec_config["max_hours"]
costs.loc["battery"] = costs_for_storage(
costs.loc["lead acid"],
costs.loc[
"lead acid"
], # line 120 in file 'costs.csv' which was battery storage was modified into lithium (same values left)
costs.loc["battery inverter"],
max_hours=max_hours["battery"],
)
Expand Down Expand Up @@ -262,9 +259,9 @@ def attach_conventional_generators(
extendable_carriers,
conventional_config,
conventional_inputs,
number_microgrids,
):
carriers = set(conventional_carriers) | set(extendable_carriers["Generator"])

_add_missing_carriers_from_costs(n, costs, carriers)

ppl = (
Expand All @@ -273,42 +270,38 @@ def attach_conventional_generators(
.rename(index=lambda s: "C" + str(s))
)
ppl["efficiency"] = ppl.efficiency.fillna(ppl.efficiency)

buses_i = n.buses.index

n.madd(
"Generator",
ppl.index,
carrier=ppl.carrier,
bus=ppl.bus,
p_nom_min=ppl.p_nom.where(ppl.carrier.isin(conventional_carriers), 0),
p_nom=ppl.p_nom.where(ppl.carrier.isin(conventional_carriers), 0),
p_nom_extendable=ppl.carrier.isin(extendable_carriers["Generator"])
| (ppl.carrier == "diesel"),
efficiency=ppl.efficiency,
marginal_cost=ppl.marginal_cost,
capital_cost=ppl.capital_cost,
build_year=ppl.datein.fillna(0).astype(int),
lifetime=(ppl.dateout - ppl.datein).fillna(np.inf),
)
microgrid_ids = [f"microgrid_{i+1}" for i in range(len(number_microgrids))]
for microgrid in microgrid_ids:
ppl.index = ppl.index + "_" + microgrid # TODO: review this
ppl["bus"] = microgrid + "_gen_bus"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@this in theory should not be needed. In custom_powerplants, the goal is to allow users to optionally add custom powerplants and they should specify the inputs that are also translated into the bus where it is installed.
It would be great to add also generators to microgrid buses when extendable, similarly to the renewable sources, but we can keep this for another PR.
In this PR, just preserving the previous behaviour is fine.

To do so, probably, there may be the need to change the reference values in custom powerplants to match the new bus names probably

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Perfect I got it, restore to the previous version!

n.madd(
"Generator",
ppl.index,
carrier=ppl.carrier,
bus=ppl.bus,
p_nom_min=ppl.p_nom.where(ppl.carrier.isin(conventional_carriers), 0),
p_nom=ppl.p_nom.where(ppl.carrier.isin(conventional_carriers), 0),
p_nom_extendable=ppl.carrier.isin(extendable_carriers["Generator"]),
efficiency=ppl.efficiency,
marginal_cost=ppl.marginal_cost,
capital_cost=ppl.capital_cost,
build_year=ppl.datein.fillna(0).astype(pd.Int64Dtype()),
lifetime=(ppl.dateout - ppl.datein).fillna(np.inf),
)

for carrier in conventional_config:
# Generatori con tecnologia influenzata
idx = n.generators.query("carrier == @carrier").index

for attr in list(set(conventional_config[carrier]) & set(n.generators)):
values = conventional_config[carrier][attr]

if f"conventional_{carrier}_{attr}" in conventional_inputs:
# Values affecting generators of technology k country-specific
# First map generator buses to countries; then map countries to p_max_pu
values = pd.read_csv(values, index_col=0).iloc[:, 0]
bus_values = n.buses.country.map(values)
n.generators[attr].update(
n.generators.loc[idx].bus.map(bus_values).dropna()
)
else:
# Single value affecting all k technology generators regardless of country.
n.generators.loc[idx, attr] = values


Expand All @@ -327,24 +320,27 @@ def attach_storageunits(n, costs, number_microgrids, technologies, extendable_ca

# Add the storage units to the power network
for tech in technologies:
n.madd(
"StorageUnit",
microgrid_ids,
" " + tech,
bus=["bus_9"],
carrier=tech,
p_nom_extendable=True,
capital_cost=costs.at[tech, "capital_cost"],
marginal_cost=costs.at[tech, "marginal_cost"],
efficiency_store=costs.at[
lookup_store["battery"], "efficiency"
], # Lead_acid and lithium have the same value
efficiency_dispatch=costs.at[
lookup_dispatch["battery"], "efficiency"
], # Lead_acid and lithium have the same value
max_hours=max_hours["battery"], # Lead_acid and lithium have the same value
cyclic_state_of_charge=True,
)
for microgrid in microgrid_ids:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In the future we can revise this and avoid the for loop

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

PerfectI keep it in mind!

n.madd(
"StorageUnit",
[microgrid],
" " + tech,
bus=[f"{microgrid}_gen_bus"],
carrier=tech,
p_nom_extendable=True,
capital_cost=costs.at[tech, "capital_cost"],
marginal_cost=costs.at[tech, "marginal_cost"],
efficiency_store=costs.at[
lookup_store["battery"], "efficiency"
], # Lead_acid and lithium have the same value
efficiency_dispatch=costs.at[
lookup_dispatch["battery"], "efficiency"
], # Lead_acid and lithium have the same value
max_hours=max_hours[
"battery"
], # Lead_acid and lithium have the same value
cyclic_state_of_charge=True,
)


def attach_load(n, load_file, tech_modelling):
Expand All @@ -355,12 +351,6 @@ def attach_load(n, load_file, tech_modelling):
n.madd("Load", demand_df.columns, bus=demand_df.columns, p_set=demand_df)


def update_transmission_costs(n, costs, length_factor=1.0, simple_hvdc_costs=False):
n.lines["capital_cost"] = (
n.lines["length"] * length_factor * costs.at["MVAC overhead", "capital_cost"]
)


if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers_dist import mock_snakemake
Expand Down Expand Up @@ -406,6 +396,7 @@ def update_transmission_costs(n, costs, length_factor=1.0, simple_hvdc_costs=Fal
snakemake.config["electricity"]["extendable_carriers"],
snakemake.config.get("conventional", {}),
conventional_inputs,
snakemake.config["microgrids_list"],
)

attach_storageunits(
Expand All @@ -423,6 +414,4 @@ def update_transmission_costs(n, costs, length_factor=1.0, simple_hvdc_costs=Fal
snakemake.config["tech_modelling"]["load_carriers"],
)

update_transmission_costs(n, costs, length_factor=1.0, simple_hvdc_costs=False)

n.export_to_netcdf(snakemake.output[0])
38 changes: 19 additions & 19 deletions scripts/build_demand.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,6 @@ def estimate_microgrid_population(raster_path, shapes_path, output_file):


def calculate_load(
n,
p,
raster_path,
shapes_path,
Expand All @@ -201,7 +200,7 @@ def calculate_load(
):
"""
Calculate the microgrid demand based on a load profile provided as input,
appropriately scaled according to the population calculated for each cluster
appropriately scaled according to the population calculated for each cluster.
The output includes a time-indexed DataFrame containing the load for each bus in the microgrid
and is saved as a CSV file.

Expand All @@ -224,18 +223,19 @@ def calculate_load(
microgrids_list : dict
Dictionary with microgrid names as keys and their cluster information as values.
start_date : str
Start date for filtering the time series data
Start date for filtering the time series data.
end_date : str
End date for filtering the time series data
End date for filtering the time series data.
inclusive : str
Specifies whether the filtering is inclusive of the start or end date. Possible values: "left" or "right".

Returns
-------
pd.DataFrame
DataFrame containing the calculated load profile for all microgrids.

"""
# Estimate the population for the two microgrid
# Estimate the population for the two microgrids
pop_microgrid = estimate_microgrid_population(raster_path, shapes_path, output_file)
# Load the building classification data
building_class = pd.read_csv(input_path)
Expand All @@ -248,16 +248,17 @@ def calculate_load(
time_index = pd.date_range(start="2013-01-01", end="2013-12-31 23:00:00", freq="h")
df = df.set_index(time_index)

# Apply time filtering based on the specified start and end dates
if inclusive == "left":
end_date = (pd.to_datetime(end_date) - pd.Timedelta(days=1)).strftime(
"%Y-%m-%d"
)
# Generate the snapshots range for filtering
snapshots_range = pd.date_range(
start=start_date, end=end_date, freq="h", inclusive="both"
)

df_filtered = df.loc[start_date:end_date] # Filter the time series data
# Filter the DataFrame based on the specified time range
df_filtered = df.loc[snapshots_range]
per_unit_load = df_filtered["per_unit_load"].values

# Loop over each microgrid
for grid_name, grid_data in microgrids_list.items():
for grid_name in microgrids_list.keys():
# Filter buildings belonging to the current microgrid
total_buildings = building_class[building_class["name_microgrid"] == grid_name]
total_buildings = total_buildings["count"].sum()
Expand Down Expand Up @@ -286,21 +287,23 @@ def calculate_load(
load_per_cluster.rename(columns=new_column_names, inplace=True)
# Add the DataFrame for the microgrid to the dictionary
microgrid_dataframes[grid_name] = load_per_cluster

# Concatenate all microgrid DataFrames horizontally
all_load_per_cluster = pd.concat(microgrid_dataframes.values(), axis=1)
# Add time indexing based on the PyPSA network snapshots
if hasattr(n, "snapshots") and len(n.snapshots) == len(all_load_per_cluster):
all_load_per_cluster.insert(0, "timestamp", n.snapshots)

# Verify that the length of snapshots matches the length of the load data
if len(snapshots_range) == len(all_load_per_cluster):
all_load_per_cluster.insert(0, "timestamp", snapshots_range)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you explain this?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

was a remnant of a correction of an error I was experiencing, it should actually be lighter and work even so!

else:
raise ValueError("Mismatch between the length of snapshots and load data rows.")

# Save the cumulative results to a CSV file
all_load_per_cluster.to_csv(output_file, index=False)
return all_load_per_cluster


def calculate_load_ramp(
input_file_buildings,
n,
p,
raster_path,
shapes_path,
Expand Down Expand Up @@ -454,7 +457,6 @@ def calculate_load_ramp(

configure_logging(snakemake)

n = pypsa.Network(snakemake.input.create_network)
sample_profile = snakemake.input["sample_profile"]
tier_percent = snakemake.params.tier["tier_percent"]
date_start = snakemake.params.snapshots["start"]
Expand Down Expand Up @@ -483,7 +485,6 @@ def calculate_load_ramp(
)
if build_demand_model == 0:
calculate_load(
n,
snakemake.config["load"]["scaling_factor"],
worldpop_path,
snakemake.input["microgrid_shapes"],
Expand All @@ -499,7 +500,6 @@ def calculate_load_ramp(
elif build_demand_model == 1:
calculate_load_ramp(
snakemake.input["clusters_with_buildings"],
n,
snakemake.config["load"]["scaling_factor"],
worldpop_path,
snakemake.input["microgrid_shapes"],
Expand Down
2 changes: 1 addition & 1 deletion scripts/build_shapes.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def create_bus_regions(microgrids_list, output_path):
# Iterate over each column in the DataFrame
for col in range(len(microgrids_list_df.columns)):
values = microgrids_list_df.iloc[:, col]
microgrid_name = microgrids_list_df.columns[col] + "_bus_renewable"
microgrid_name = microgrids_list_df.columns[col] + "_gen_bus"
davide-f marked this conversation as resolved.
Show resolved Hide resolved

# Define the vertices of the rectangle
Top_left = (values[0], values[3])
Expand Down
Loading
Loading