diff --git a/Snakefile b/Snakefile index d1a03e9..bbb75db 100644 --- a/Snakefile +++ b/Snakefile @@ -113,7 +113,6 @@ rule build_demand: }, sample_profile=PROFILE, building_csv="resources/buildings/buildings_type.csv", - create_network="networks/base.nc", microgrid_shapes="resources/shapes/microgrid_shapes.geojson", clusters_with_buildings="resources/buildings/cluster_with_buildings.geojson", output: @@ -147,6 +146,7 @@ rule build_shapes: rule create_network: input: clusters="resources/buildings/clustered_buildings.geojson", + load="resources/demand/microgrid_load.csv", output: "networks/base.nc", log: diff --git a/config.distribution.yaml b/config.distribution.yaml index 5278275..260c7e8 100644 --- a/config.distribution.yaml +++ b/config.distribution.yaml @@ -57,13 +57,12 @@ house_area_limit: area_limit: 255 # All buildings without a specified tag, having an area less than house_limit will be considered houses build_demand_type: - type: 0 + type: "From_file" std: "on" # type allows to select the mode by which the microgrid demand profile is generated. -# 0 = a predetermined hourly profile is used -# 1 = an average hourly profile is calculated by exploiting the ramp tool -# 2 = an average hourly profile and its standard deviation is calculated using the ramp tool, -# and both quantities are used to calculate demand. +# From_file = a predetermined hourly profile is used +# Ramp = an average hourly profile is calculated by exploiting the ramp tool when std on also the +# standard deviation is calculated and used to calculate demand. # definition of the Coordinate Reference Systems crs: diff --git a/resources/powerplants.csv b/resources/powerplants.csv index 14c386e..a0ff1fc 100644 --- a/resources/powerplants.csv +++ b/resources/powerplants.csv @@ -1,2 +1,2 @@ ,Name,Fueltype,Technology,Set,Country,Capacity,Efficiency,Duration,Volume_Mm3,DamHeight_m,StorageCapacity_MWh,DateIn,DateRetrofit,DateOut,lat,lon,EIC,projectID,bus -1,New_Diesel_Generator,Diesel,,PP,SL,0.0,,0.0,0.0,0.0,0.0,1986,1986.0,2031,-21.1667,27.5167,{nan},{'GPD': {'WRI1023018'}},bus_9 +1,New_Diesel_Generator,Diesel,,PP,SL,0.0,,0.0,0.0,0.0,0.0,1986,1986.0,2031,-21.1667,27.5167,{nan},{'GPD': {'WRI1023018'}},microgrid_1_gen_bus diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index 46ed76b..b293471 100644 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -87,20 +87,13 @@ def _add_missing_carriers_from_costs(n, costs, carriers): def load_costs(tech_costs, config, elec_config, Nyears=1): """ - Set all asset costs and other parameters. + set all asset costs and other parameters """ - idx = pd.IndexSlice costs = pd.read_csv(tech_costs, index_col=list(range(3))).sort_index() - costs.loc[ - costs.unit.str.contains("/kWel"), "value" - ] *= 1e3 # Convert EUR/kW to EUR/MW - costs.loc[ - costs.unit.str.contains("/kWh"), "value" - ] *= 1e3 # Convert EUR/kWh to EUR/MWh - costs.loc[costs.unit.str.contains("USD"), "value"] *= config[ - "USD2013_to_EUR2013" - ] # Convert USD to EUR + # correct units to MW and EUR + costs.loc[costs.unit.str.contains("/kW"), "value"] *= 1e3 + costs.loc[costs.unit.str.contains("USD"), "value"] *= config["USD2013_to_EUR2013"] costs = ( costs.loc[idx[:, config["year"], :], "value"] @@ -156,13 +149,17 @@ def costs_for_storage(store, link1, link2=None, max_hours=1.0): max_hours = elec_config["max_hours"] costs.loc["battery"] = costs_for_storage( - costs.loc["lithium"], + costs.loc[ + "lithium" + ], # line 119 in file costs.csv' which was battery storage was modified into lithium (same values left) costs.loc["battery inverter"], max_hours=max_hours["battery"], ) - + max_hours = elec_config["max_hours"] costs.loc["battery"] = costs_for_storage( - costs.loc["lead acid"], + costs.loc[ + "lead acid" + ], # line 120 in file 'costs.csv' which was battery storage was modified into lithium (same values left) costs.loc["battery inverter"], max_hours=max_hours["battery"], ) @@ -327,24 +324,27 @@ def attach_storageunits(n, costs, number_microgrids, technologies, extendable_ca # Add the storage units to the power network for tech in technologies: - n.madd( - "StorageUnit", - microgrid_ids, - " " + tech, - bus=["bus_9"], - carrier=tech, - p_nom_extendable=True, - capital_cost=costs.at[tech, "capital_cost"], - marginal_cost=costs.at[tech, "marginal_cost"], - efficiency_store=costs.at[ - lookup_store["battery"], "efficiency" - ], # Lead_acid and lithium have the same value - efficiency_dispatch=costs.at[ - lookup_dispatch["battery"], "efficiency" - ], # Lead_acid and lithium have the same value - max_hours=max_hours["battery"], # Lead_acid and lithium have the same value - cyclic_state_of_charge=True, - ) + for microgrid in microgrid_ids: + n.madd( + "StorageUnit", + [microgrid], + " " + tech, + bus=[f"{microgrid}_gen_bus"], + carrier=tech, + p_nom_extendable=True, + capital_cost=costs.at[tech, "capital_cost"], + marginal_cost=costs.at[tech, "marginal_cost"], + efficiency_store=costs.at[ + lookup_store["battery"], "efficiency" + ], # Lead_acid and lithium have the same value + efficiency_dispatch=costs.at[ + lookup_dispatch["battery"], "efficiency" + ], # Lead_acid and lithium have the same value + max_hours=max_hours[ + "battery" + ], # Lead_acid and lithium have the same value + cyclic_state_of_charge=True, + ) def attach_load(n, load_file, tech_modelling): @@ -355,12 +355,6 @@ def attach_load(n, load_file, tech_modelling): n.madd("Load", demand_df.columns, bus=demand_df.columns, p_set=demand_df) -def update_transmission_costs(n, costs, length_factor=1.0, simple_hvdc_costs=False): - n.lines["capital_cost"] = ( - n.lines["length"] * length_factor * costs.at["MVAC overhead", "capital_cost"] - ) - - if __name__ == "__main__": if "snakemake" not in globals(): from _helpers_dist import mock_snakemake @@ -423,6 +417,4 @@ def update_transmission_costs(n, costs, length_factor=1.0, simple_hvdc_costs=Fal snakemake.config["tech_modelling"]["load_carriers"], ) - update_transmission_costs(n, costs, length_factor=1.0, simple_hvdc_costs=False) - n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/build_demand.py b/scripts/build_demand.py index 37f1f27..c3f7fc1 100644 --- a/scripts/build_demand.py +++ b/scripts/build_demand.py @@ -187,7 +187,6 @@ def estimate_microgrid_population(raster_path, shapes_path, output_file): def calculate_load( - n, p, raster_path, shapes_path, @@ -201,7 +200,7 @@ def calculate_load( ): """ Calculate the microgrid demand based on a load profile provided as input, - appropriately scaled according to the population calculated for each cluster + appropriately scaled according to the population calculated for each cluster. The output includes a time-indexed DataFrame containing the load for each bus in the microgrid and is saved as a CSV file. @@ -224,18 +223,19 @@ def calculate_load( microgrids_list : dict Dictionary with microgrid names as keys and their cluster information as values. start_date : str - Start date for filtering the time series data + Start date for filtering the time series data. end_date : str - End date for filtering the time series data + End date for filtering the time series data. inclusive : str Specifies whether the filtering is inclusive of the start or end date. Possible values: "left" or "right". + Returns ------- pd.DataFrame DataFrame containing the calculated load profile for all microgrids. """ - # Estimate the population for the two microgrid + # Estimate the population for the two microgrids pop_microgrid = estimate_microgrid_population(raster_path, shapes_path, output_file) # Load the building classification data building_class = pd.read_csv(input_path) @@ -248,16 +248,17 @@ def calculate_load( time_index = pd.date_range(start="2013-01-01", end="2013-12-31 23:00:00", freq="h") df = df.set_index(time_index) - # Apply time filtering based on the specified start and end dates - if inclusive == "left": - end_date = (pd.to_datetime(end_date) - pd.Timedelta(days=1)).strftime( - "%Y-%m-%d" - ) + # Generate the snapshots range for filtering + snapshots_range = pd.date_range( + start=start_date, end=end_date, freq="h", inclusive="both" + ) - df_filtered = df.loc[start_date:end_date] # Filter the time series data + # Filter the DataFrame based on the specified time range + df_filtered = df.loc[snapshots_range] per_unit_load = df_filtered["per_unit_load"].values + # Loop over each microgrid - for grid_name, grid_data in microgrids_list.items(): + for grid_name in microgrids_list.keys(): # Filter buildings belonging to the current microgrid total_buildings = building_class[building_class["name_microgrid"] == grid_name] total_buildings = total_buildings["count"].sum() @@ -286,13 +287,11 @@ def calculate_load( load_per_cluster.rename(columns=new_column_names, inplace=True) # Add the DataFrame for the microgrid to the dictionary microgrid_dataframes[grid_name] = load_per_cluster + # Concatenate all microgrid DataFrames horizontally all_load_per_cluster = pd.concat(microgrid_dataframes.values(), axis=1) - # Add time indexing based on the PyPSA network snapshots - if hasattr(n, "snapshots") and len(n.snapshots) == len(all_load_per_cluster): - all_load_per_cluster.insert(0, "timestamp", n.snapshots) - else: - raise ValueError("Mismatch between the length of snapshots and load data rows.") + all_load_per_cluster.index = snapshots_range + # Save the cumulative results to a CSV file all_load_per_cluster.to_csv(output_file, index=False) return all_load_per_cluster @@ -300,7 +299,6 @@ def calculate_load( def calculate_load_ramp( input_file_buildings, - n, p, raster_path, shapes_path, @@ -454,7 +452,6 @@ def calculate_load_ramp( configure_logging(snakemake) - n = pypsa.Network(snakemake.input.create_network) sample_profile = snakemake.input["sample_profile"] tier_percent = snakemake.params.tier["tier_percent"] date_start = snakemake.params.snapshots["start"] @@ -481,9 +478,8 @@ def calculate_load_ramp( snakemake.input["microgrid_shapes"], snakemake.output["electric_load"], ) - if build_demand_model == 0: + if build_demand_model == "From_file": calculate_load( - n, snakemake.config["load"]["scaling_factor"], worldpop_path, snakemake.input["microgrid_shapes"], @@ -496,10 +492,9 @@ def calculate_load_ramp( inclusive, ) - elif build_demand_model == 1: + elif build_demand_model == "Ramp": calculate_load_ramp( snakemake.input["clusters_with_buildings"], - n, snakemake.config["load"]["scaling_factor"], worldpop_path, snakemake.input["microgrid_shapes"], diff --git a/scripts/build_shapes.py b/scripts/build_shapes.py index 481ed40..0c8f492 100644 --- a/scripts/build_shapes.py +++ b/scripts/build_shapes.py @@ -84,7 +84,7 @@ def create_bus_regions(microgrids_list, output_path): # Iterate over each column in the DataFrame for col in range(len(microgrids_list_df.columns)): values = microgrids_list_df.iloc[:, col] - microgrid_name = microgrids_list_df.columns[col] + "_bus_renewable" + microgrid_name = microgrids_list_df.columns[col] + "_gen_bus" # Define the vertices of the rectangle Top_left = (values[0], values[3]) diff --git a/scripts/create_network.py b/scripts/create_network.py index f59596f..4a1db24 100644 --- a/scripts/create_network.py +++ b/scripts/create_network.py @@ -38,12 +38,25 @@ def create_network(): return n -def create_microgrid_network(n, input_file, voltage_level, line_type, microgrid_list): +def calculate_power_node_position(load_file, cluster_bus): + load_sums = load_file.sum(numeric_only=True) + load_sums.index = cluster_bus["cluster"] + gdf = cluster_bus.set_index("cluster") + gdf["cluster_load"] = load_sums.values.T + x_wgt_avg = (gdf.geometry.x * load_sums).sum() / load_sums.sum() + y_wgt_avg = (gdf.geometry.y * load_sums).sum() / load_sums.sum() + + return x_wgt_avg, y_wgt_avg + + +def create_microgrid_network( + n, input_file, voltage_level, line_type, microgrid_list, input_path +): """ Creates local microgrid networks within the PyPSA network. The local microgrid networks are distribution networks created based on the buildings data, stored in "resources/buildings/microgrids_buildings.geojson". Each bus corresponds to a cluster of buildings within a microgrid, with its coordinates defined in the input GeoJSON file. - The lines connecting buses are determined using Delaunay triangulation,ensuring minimal total line length. + The lines connecting buses are determined using Delaunay triangulation, ensuring minimal total line length. The function avoids duplicate buses and ensures buses are assigned to the correct SubNetwork. Parameters ---------- @@ -66,20 +79,34 @@ def create_microgrid_network(n, input_file, voltage_level, line_type, microgrid_ """ data = gpd.read_file(input_file) + load = pd.read_csv(input_path) bus_coords = set() # Keep track of bus coordinates to avoid duplicates for grid_name, grid_data in microgrid_list.items(): + # List to store bus names and their positions for triangulation + microgrid_buses = [] + bus_positions = [] + # Filter data for the current microgrid grid_data = data[data["name_microgrid"] == grid_name] + load_data = load[[col for col in load.columns if grid_name in col]] + x_gen_bus, y_gen_bus = calculate_power_node_position(load_data, grid_data) + gen_bus_name = f"{grid_name}_gen_bus" + n.add( + "Bus", + gen_bus_name, + x=x_gen_bus, + y=y_gen_bus, + v_nom=voltage_level, + sub_network=grid_name, + ) + microgrid_buses.append(gen_bus_name) + bus_positions.append((x_gen_bus, y_gen_bus)) # Create a SubNetwork for the current microgrid if it does not exist if grid_name not in n.sub_networks.index: n.add("SubNetwork", grid_name, carrier="electricity") - # List to store bus names and their positions for triangulation - microgrid_buses = [] - bus_positions = [] - for _, feature in grid_data.iterrows(): point_geom = feature.geometry bus_name = f"{grid_name}_bus_{feature['cluster']}" @@ -247,13 +274,7 @@ def create_microgrid_network(n, input_file, voltage_level, line_type, microgrid_ snakemake.config["electricity"]["voltage"], snakemake.config["electricity"]["line_type"], microgrids_list, + snakemake.input["load"], ) - - # add_bus_at_center(n, - # snakemake.config["microgrids_list"], - # snakemake.config["electricity"]["voltage"], - # snakemake.config["electricity"]["line_type"]) - - # plot_microgrid_network(n) a = 12 n.export_to_netcdf(snakemake.output[0]) diff --git a/test/config.distribution.test.yaml b/test/config.distribution.test.yaml index 4c46967..7dbbb81 100644 --- a/test/config.distribution.test.yaml +++ b/test/config.distribution.test.yaml @@ -55,8 +55,12 @@ house_area_limit: area_limit: 255 build_demand_type: - type: 0 + type: "From_file" std: "on" +# type allows to select the mode by which the microgrid demand profile is generated. +# From_file = a predetermined hourly profile is used +# Ramp = an average hourly profile is calculated by exploiting the ramp tool when std on also the +# standard deviation is calculated and used to calculate demand. # definition of the Coordinate Reference Systems crs: