From 636229a49bfd40883719867e3bd5550522515d74 Mon Sep 17 00:00:00 2001 From: "pierre-francois.duc" Date: Fri, 31 May 2024 16:19:20 +0200 Subject: [PATCH 01/20] Add functions to create a single Dataframe containing oemof results Those are in a separate module as they could very well be implemented into oemof.solph --- .../datapackage/post_processing.py | 186 ++++++++++++++++++ 1 file changed, 186 insertions(+) create mode 100644 src/oemof_tabular_plugins/datapackage/post_processing.py diff --git a/src/oemof_tabular_plugins/datapackage/post_processing.py b/src/oemof_tabular_plugins/datapackage/post_processing.py new file mode 100644 index 00000000..a8c9bfc1 --- /dev/null +++ b/src/oemof_tabular_plugins/datapackage/post_processing.py @@ -0,0 +1,186 @@ +import pandas as pd +import oemof.solph as solph + + +def infer_busses_carrier(energy_system): + """Loop through the nodes of an energy system and infer the carrier of busses from them + + Parameters + ---------- + energy_system: oemof.solph.EnergySystem instance + + Returns + ------- + dict mapping the busses labels to their carrier + + """ + + busses_carrier = {} + + for node in energy_system.nodes: + if hasattr(node, "carrier"): + for attribute in ("bus", "from_bus"): + if hasattr(node, attribute): + + bus_label = getattr(node, attribute).label + if bus_label in busses_carrier: + if busses_carrier[bus_label] != node.carrier: + raise ValueError( + f"Two different carriers ({busses_carrier[bus_label]}, {node.carrier}) are associated to the same bus '{bus_label}'" + ) + else: + busses_carrier[bus_label] = node.carrier + + busses = [node.label for node in energy_system.nodes if isinstance(node, solph.Bus)] + + for bus_label in busses: + if bus_label not in busses_carrier: + raise ValueError( + f"Bus '{bus_label}' is missing from the busses carrier dict inferred from the EnergySystem instance" + ) + + return busses_carrier + + +def infer_asset_types(energy_system): + """Loop through the nodes of an energy system and infer their types + + Parameters + ---------- + energy_system: oemof.solph.EnergySystem instance + + Returns + ------- + a dict mapping the asset (nodes which are not busses) labels to their type + + """ + asset_types = {} + for node in energy_system.nodes: + if isinstance(node, solph.Bus) is False: + asset_types[node.label] = node.type + return asset_types + + +def construct_multi_index_levels(flow_tuple, busses_info, assets_info=None): + """Infer the index levels of the multi index dataframe sequence tuple and extra optional information + + Parameters + ---------- + flow_tuple: tuple of bus label and asset label + (A,B) means flow from A to B + busses_info: either a list or a dict + if not a list of busses labels, then a dict where busses labels are keys mapped to the bus carrier + assets_info: dict + mapping of asset labels to their type + + Returns + ------- + a tuple with (bus label, direction of flow relative to asset, asset label, bus carrier (optional), asset type (optional)) direction is either 'in' or 'out'. + + The minimal tuple (b_elec, "in", demand) would read the flow goes from bus 'b_elec' '"in"' asset 'demand' + + """ + if isinstance(busses_info, dict): + busses_labels = [bn for bn in busses_info] + elif isinstance(busses_info, list): + busses_labels = busses_info + + # infer which of the 2 nodes composing the flow is the bus + bus_label = set(busses_labels).intersection(set(flow_tuple)) + if len(bus_label) == 1: + bus_label = bus_label.pop() + else: + raise ValueError("Flow tuple does not contain only one bus node") + # get position of bus node in the flow tuple + idx_bus = flow_tuple.index(bus_label) + answer = None + + # determine whether the flow goes from bus to asset or reverse + if idx_bus == 0: + # going from bus to asset, so the flow goes in to the asset + asset_label = flow_tuple[1] + answer = (bus_label, "in", asset_label) + + elif idx_bus == 1: + asset_label = flow_tuple[0] + # going from asset to bus, so the flow goes out of the asset + answer = (bus_label, "out", asset_label) + + # add information of the bus carrier, if provided + if isinstance(busses_info, dict): + answer = answer + (busses_info[bus_label],) + # add information of the asset type, if provided + if assets_info is not None: + answer = answer + (assets_info[asset_label],) + return answer + + +def construct_dataframe_from_results(energy_system, bus_carrier=True, asset_type=True): + """ + + Parameters + ---------- + energy_system: oemof.solph.EnergySystem instance + bus_carrier: bool (opt) + If set to true, the multi-index of the DataFrame will have a level about bus carrier + asset_type: bool (opt) + If set to true, the multi-index of the DataFrame will have a level about the asset type + + + Returns + ------- + Dataframe with oemof result sequences's timestamps as columns as well as investment and a multi-index built automatically, see construct_multi_index_levels for more information on the multi-index + """ + mi_levels = [ + "bus", + "direction", + "asset", + ] + + busses_info = infer_busses_carrier(energy_system) + if bus_carrier is False: + busses_info = list(busses_info.keys()) + else: + mi_levels.append("carrier") + + if asset_type is True: + assets_info = infer_asset_types(energy_system) + mi_levels.append("facade_type") + else: + assets_info = None + + results = energy_system.results + + if isinstance(results, dict): + ts = [] + investments = [] + flows = [] + for x, res in solph.views.convert_keys_to_strings(results).items(): + if x[1] != "None": + col_name = res["sequences"].columns[0] + ts.append( + res["sequences"].rename( + columns={col_name: x, "variable_name": "timesteps"} + ) + ) + flows.append( + construct_multi_index_levels( + x, busses_info=busses_info, assets_info=assets_info + ) + ) + invest = None if res["scalars"].empty is True else res["scalars"].invest + + investments.append(invest) + ts_df = pd.concat(ts, axis=1, join="inner") + mindex = pd.MultiIndex.from_tuples(flows, names=mi_levels) + + df = pd.DataFrame( + data=ts_df.T.to_dict(orient="split")["data"], + index=mindex, + columns=ts_df.index, + ) + + df["investments"] = investments + df.sort_index(inplace=True) + + return df From 6029fafb20ecbc6dc35096ad4dfbb465ed0363ed Mon Sep 17 00:00:00 2001 From: "pierre-francois.duc" Date: Fri, 31 May 2024 16:21:43 +0200 Subject: [PATCH 02/20] Add the oemof results Dataframe to an OTP version of Calculator --- examples/scripts/compute.py | 4 +++- .../post_processing/post_processing.py | 19 ++++++++++++++----- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/examples/scripts/compute.py b/examples/scripts/compute.py index 14697955..23e06f0c 100644 --- a/examples/scripts/compute.py +++ b/examples/scripts/compute.py @@ -1,5 +1,6 @@ import os from oemof.solph import EnergySystem, Model +from oemof.solph import processing from oemof.solph.processing import parameter_as_dict # TODO this should be with from oemof.tabular.datapackage import building when https://github.com/oemof/oemof-tabular/pull/173 is merged @@ -84,7 +85,8 @@ # extract parameters and results params = parameter_as_dict(es) results = m.results() + es.results = processing.results(m) - post_processing(params, results, results_path) + post_processing(params, es, results_path) print("done") diff --git a/src/oemof_tabular_plugins/general/post_processing/post_processing.py b/src/oemof_tabular_plugins/general/post_processing/post_processing.py index 76e624d5..afbd0b47 100644 --- a/src/oemof_tabular_plugins/general/post_processing/post_processing.py +++ b/src/oemof_tabular_plugins/general/post_processing/post_processing.py @@ -5,6 +5,9 @@ import warnings from oemof.tabular.postprocessing.core import Calculator from oemof.tabular.postprocessing import calculations as clc, naming +from oemof_tabular_plugins.datapackage.post_processing import ( + construct_dataframe_from_results, +) # ToDo: the functions below need proper testing and appropriate logging info for the user's understanding # NOTE: the post-processing module is expected to change once the main multi-index dataframe is created, so @@ -650,18 +653,24 @@ def create_costs_table(all_scalars, results, capacities_df, storage_capacities_d return costs_df -def post_processing(params, results, results_path): +class OTPCalculator(Calculator): + def __init__(self, input_parameters, energy_system): + self.df_results = construct_dataframe_from_results(energy_system) + super().__init__(input_parameters, energy_system.results) + + +def post_processing(params, es, results_path): # ToDo: adapt this function after multi-index dataframe is implemented to make it more concise / cleaner # ToDo: params can be accessed in results so will not need to be a separate argument """ The main post-processing function extracts various scalar and timeseries data and stores it in CSV files. :param params: energy system parameters - :param results: oemof model results + :param es: oemof energy_system with results in it, ie es.results = processing.results(m) has been performed :param results_path: results directory path """ # initiate calculator for post-processing - calculator = Calculator(params, results) - + calculator = OTPCalculator(params, es) + results = es.results # calculate scalars using functions from clc module aggregated_flows = clc.AggregatedFlows(calculator).result storage_losses = clc.StorageLosses(calculator).result @@ -768,4 +777,4 @@ def post_processing(params, results, results_path): # save the DataFrame to a CSV file costs_df.to_csv(filepath_name_costs, index=False) - return + return calculator From a1322065b4e7482f7b4e0a3d37e180280943fe0a Mon Sep 17 00:00:00 2001 From: "pierre-francois.duc" Date: Fri, 31 May 2024 16:47:56 +0200 Subject: [PATCH 03/20] Populate the MultiIndex DataFrame columns and start describing some calculations --- .../data/elements/conversion.csv | 2 +- .../data/elements/storage.csv | 2 +- .../data/elements/volatile.csv | 6 +- examples/scripts/compute.py | 6 +- .../datapackage/post_processing.py | 153 ++++++++++++++++++ .../post_processing/post_processing.py | 14 +- 6 files changed, 173 insertions(+), 10 deletions(-) diff --git a/examples/scenarios/general_add_cost_inputs/data/elements/conversion.csv b/examples/scenarios/general_add_cost_inputs/data/elements/conversion.csv index cd5e80a7..44f43383 100644 --- a/examples/scenarios/general_add_cost_inputs/data/elements/conversion.csv +++ b/examples/scenarios/general_add_cost_inputs/data/elements/conversion.csv @@ -1,2 +1,2 @@ name;type;carrier;tech;capacity;capacity_cost;marginal_cost;carrier_cost;efficiency;from_bus;to_bus;expandable;capex;opex_fix;lifetime -gas-pp;conversion;gas;ocgt;50;;1.5;0;0.48;gas-bus;elec-bus;False;975000;11625;20 +gas-pp;conversion;gas;ocgt;50;96629.94;1.5;0;0.48;gas-bus;elec-bus;False;975000;11625;20 diff --git a/examples/scenarios/general_add_cost_inputs/data/elements/storage.csv b/examples/scenarios/general_add_cost_inputs/data/elements/storage.csv index 7fece10a..ba09f510 100644 --- a/examples/scenarios/general_add_cost_inputs/data/elements/storage.csv +++ b/examples/scenarios/general_add_cost_inputs/data/elements/storage.csv @@ -1,2 +1,2 @@ name;carrier;tech;storage_capacity;storage_capacity_potential;capacity;efficiency;capacity_cost;storage_capacity_cost;marginal_cost;type;bus;invest_relation_output_capacity;invest_relation_input_output;expandable;capex;opex_fix;lifetime -battery-storage;lithium;battery;0;;0;0.92;0;;1.8;storage;elec-bus;1;1;True;338000;10140;15 +battery-storage;lithium;battery;0;;0;0.92;0;44941.41;1.8;storage;elec-bus;1;1;True;338000;10140;15 diff --git a/examples/scenarios/general_add_cost_inputs/data/elements/volatile.csv b/examples/scenarios/general_add_cost_inputs/data/elements/volatile.csv index 3747bf37..d825274e 100644 --- a/examples/scenarios/general_add_cost_inputs/data/elements/volatile.csv +++ b/examples/scenarios/general_add_cost_inputs/data/elements/volatile.csv @@ -1,4 +1,4 @@ name;type;carrier;tech;capacity;capacity_cost;bus;marginal_cost;profile;expandable;capex;opex_fix;lifetime -wind;volatile;wind;onshore;0.0;;elec-bus;0.0;wind-profile;True;1362500;27500;20 -pv;volatile;solar;pv;0.0;;elec-bus;0.0;pv-profile;True;1050000;10500;30 -hydro;volatile;hydro;hydro;0.0;;elec-bus;0.0;1;True;2150000;47300;40 +wind;volatile;wind;onshore;0.0;146288.96;elec-bus;0.0;wind-profile;True;1362500;27500;20 +pv;volatile;solar;pv;0.0;86781.36;elec-bus;0.0;pv-profile;True;1050000;10500;30 +hydro;volatile;hydro;hydro;0.0;190192.3;elec-bus;0.0;1;True;2150000;47300;40 diff --git a/examples/scripts/compute.py b/examples/scripts/compute.py index 23e06f0c..30914789 100644 --- a/examples/scripts/compute.py +++ b/examples/scripts/compute.py @@ -26,7 +26,7 @@ # -------------- USER INPUTS -------------- # list of scenarios to be evaluated -scenarios = ["general_add_cost_inputs"] +scenarios = ["wefe_pv_panel"] # weighted average cost of capital (WACC) - might move later # this parameter is needed if CAPEX, OPEX fix and lifetime are included wacc = 0.06 @@ -87,6 +87,8 @@ results = m.results() es.results = processing.results(m) - post_processing(params, es, results_path) + post_processing( + params, es, results_path, dp_path=os.path.join(scenario_dir, "datapackage.json") + ) print("done") diff --git a/src/oemof_tabular_plugins/datapackage/post_processing.py b/src/oemof_tabular_plugins/datapackage/post_processing.py index a8c9bfc1..9b605215 100644 --- a/src/oemof_tabular_plugins/datapackage/post_processing.py +++ b/src/oemof_tabular_plugins/datapackage/post_processing.py @@ -1,7 +1,77 @@ import pandas as pd +from datapackage import Package import oemof.solph as solph +RAW_OUTPUTS = ["investments"] +PROCESSED_RAW_OUTPUTS = ["flow_min", "flow_max", "aggregated_flow"] +RAW_INPUTS = [ + "marginal_cost", + "carrier_cost", + "capacity_cost", + "storage_capacity_cost", + "capacity", + "expendable", + "storage_capacity", + "min_capacity", + "max_capacity", + "efficiency", + "renewable_factor", + "emission_factor", +] + + +def compute_renewable_share(results_df, argument_names, col_name): + _check_arguments(results_df, argument_names, col_name) + + +def compute_cO2_emissions(results_df, argument_names, col_name): + _check_arguments(results_df, argument_names, col_name) + + +def _check_arguments(series, index_names, col_name): + """Check that all required argument are present in the DataFrame slice""" + for arg in index_names: + if arg not in series.index: + raise AttributeError( + f"The column {arg} is not present within the results DataFrame and is required to compute '{col_name}', listed in the calculations to be executed" + ) + + +def compute_variable_costs(results_df, argument_names, col_name): + """TODO write a docstring here""" + _check_arguments(results_df, argument_names, col_name) + return results_df.aggregated_flow * ( + results_df.marginal_cost.astype("float") + + results_df.carrier_cost.astype("float") + ) + + +CALCULATED_OUTPUTS = [ + { + "column_name": "renewable_share", + "operation": compute_renewable_share, + "description": "The renewable share is computed from the flow and the renewable factor", + "argument_names": [ + "aggregated_flow", + "renewable_factor", + ], + }, + { + "column_name": "cO2_emmissions", + "operation": compute_cO2_emissions, + "description": "Compute the amount of CO2 emitted by each component of the system.", + "argument_names": ["aggregated_flow", "emission_factor", "tech"], + }, + { + "column_name": "variable_costs", + "operation": compute_variable_costs, + "description": "this output is calculated ...", + "argument_names": ["aggregated_flow", "marginal_cost", "carrier_cost"], + }, +] + + def infer_busses_carrier(energy_system): """Loop through the nodes of an energy system and infer the carrier of busses from them @@ -184,3 +254,86 @@ def construct_dataframe_from_results(energy_system, bus_carrier=True, asset_type df.sort_index(inplace=True) return df + + +def process_raw_results(df_results): + """Compute the min, max and aggregated flows for each asset-bus pair + + Parameters + ---------- + df_results: pandas DataFrame + the outcome of construct_dataframe_from_results() + + Returns + ------- + """ + temp = df_results[df_results.columns.difference(RAW_OUTPUTS)] + df_results["flow_min"] = temp.min(axis=1) + df_results["flow_max"] = temp.max(axis=1) + df_results["aggregated_flow"] = temp.sum(axis=1) + + +def process_raw_inputs(df_results, dp_path, raw_inputs=RAW_INPUTS): + """Find the input parameters from the datapackage.json file + + + Parameters + ---------- + df_results: pandas DataFrame + the outcome of construct_dataframe_from_results() + dp_path: string + path to the datapackage.json file + raw_inputs: list of string + list of parameters from the datapackage one would like to collect for result post-processing + + Returns + ------- + + """ + p = Package(dp_path) + inputs_df = None + for r in p.resources: + if "elements" in r.descriptor["path"] and r.name != "bus": + df = pd.DataFrame.from_records(r.read(keyed=True), index="name") + resource_inputs = df[list(set(raw_inputs).intersection(set(df.columns)))].T + + if inputs_df is None: + if not resource_inputs.empty: + inputs_df = resource_inputs + else: + inputs_df = inputs_df.join(resource_inputs) + + # append the inputs of the datapackage to the results DataFrame + inputs_df.T.index.name = "asset" + return df_results.join(inputs_df.T.apply(pd.to_numeric, downcast="float")) + + +def apply_calculations(results_df, calculations=CALCULATED_OUTPUTS): + """Apply calculation and populate the columns of the results_df + + Parameters + ---------- + df_results: pandas DataFrame + the outcome of process_raw_input() + calculations: dict + dict containing + "column_name" (the name of the new column within results_df), + "operation" (handle of a function which will be applied row-wise to results_df), + "description" (a string for documentation purposes) + and "argument_names" (list of columns needed within results_df) + + Returns + ------- + + """ + for calc in calculations: + var_name = calc["column_name"] + try: + results_df[var_name] = results_df.apply( + calc["operation"], + argument_names=calc["argument_names"], + col_name=var_name, + axis=1, + ) + except AttributeError as e: + print(e) diff --git a/src/oemof_tabular_plugins/general/post_processing/post_processing.py b/src/oemof_tabular_plugins/general/post_processing/post_processing.py index afbd0b47..f94f4758 100644 --- a/src/oemof_tabular_plugins/general/post_processing/post_processing.py +++ b/src/oemof_tabular_plugins/general/post_processing/post_processing.py @@ -7,6 +7,9 @@ from oemof.tabular.postprocessing import calculations as clc, naming from oemof_tabular_plugins.datapackage.post_processing import ( construct_dataframe_from_results, + process_raw_results, + process_raw_inputs, + apply_calculations, ) # ToDo: the functions below need proper testing and appropriate logging info for the user's understanding @@ -654,12 +657,15 @@ def create_costs_table(all_scalars, results, capacities_df, storage_capacities_d class OTPCalculator(Calculator): - def __init__(self, input_parameters, energy_system): + def __init__(self, input_parameters, energy_system, dp_path): self.df_results = construct_dataframe_from_results(energy_system) + process_raw_results(self.df_results) + self.df_results = process_raw_inputs(self.df_results, dp_path) + apply_calculations(self.df_results) super().__init__(input_parameters, energy_system.results) -def post_processing(params, es, results_path): +def post_processing(params, es, results_path, dp_path): # ToDo: adapt this function after multi-index dataframe is implemented to make it more concise / cleaner # ToDo: params can be accessed in results so will not need to be a separate argument """ @@ -667,9 +673,11 @@ def post_processing(params, es, results_path): :param params: energy system parameters :param es: oemof energy_system with results in it, ie es.results = processing.results(m) has been performed :param results_path: results directory path + :param dp_path: path to the datapackage.json file """ # initiate calculator for post-processing - calculator = OTPCalculator(params, es) + calculator = OTPCalculator(params, es, dp_path) + print(calculator.df_results) results = es.results # calculate scalars using functions from clc module aggregated_flows = clc.AggregatedFlows(calculator).result From 380df932e3c3ecefe4d1cec6350fb248bbbc68f8 Mon Sep 17 00:00:00 2001 From: "pierre-francois.duc" Date: Thu, 6 Jun 2024 10:45:29 +0200 Subject: [PATCH 04/20] Add validation of calculations and rearrange code a bit --- .../datapackage/post_processing.py | 74 +++++++++++++------ .../post_processing/post_processing.py | 2 +- 2 files changed, 53 insertions(+), 23 deletions(-) diff --git a/src/oemof_tabular_plugins/datapackage/post_processing.py b/src/oemof_tabular_plugins/datapackage/post_processing.py index 9b605215..2287af3e 100644 --- a/src/oemof_tabular_plugins/datapackage/post_processing.py +++ b/src/oemof_tabular_plugins/datapackage/post_processing.py @@ -1,3 +1,5 @@ +import logging + import pandas as pd from datapackage import Package import oemof.solph as solph @@ -21,32 +23,32 @@ ] -def compute_renewable_share(results_df, argument_names, col_name): - _check_arguments(results_df, argument_names, col_name) +def compute_renewable_share(results_df): + pass -def compute_cO2_emissions(results_df, argument_names, col_name): - _check_arguments(results_df, argument_names, col_name) +def compute_cO2_emissions(results_df): + pass -def _check_arguments(series, index_names, col_name): - """Check that all required argument are present in the DataFrame slice""" - for arg in index_names: - if arg not in series.index: +def _check_arguments(df, column_names, col_name): + """Check that all required argument are present in the DataFrame columns""" + for arg in column_names: + if arg not in df.columns: raise AttributeError( f"The column {arg} is not present within the results DataFrame and is required to compute '{col_name}', listed in the calculations to be executed" ) -def compute_variable_costs(results_df, argument_names, col_name): +def compute_variable_costs(results_df): """TODO write a docstring here""" - _check_arguments(results_df, argument_names, col_name) return results_df.aggregated_flow * ( - results_df.marginal_cost.astype("float") - + results_df.carrier_cost.astype("float") + results_df.marginal_cost # .astype("float") + + results_df.carrier_cost # .astype("float") ) +# TODO turn the dict into a class simular to the one of Calculation of oemof.tabular CALCULATED_OUTPUTS = [ { "column_name": "renewable_share", @@ -71,6 +73,30 @@ def compute_variable_costs(results_df, argument_names, col_name): }, ] +# Add docstrings from function handles for documentation purposes +for calc in CALCULATED_OUTPUTS: + func_handle = calc.get("operation", None) + if callable(func_handle): + calc["docstring"] = func_handle.__doc__ + else: + calc["docstring"] = "" + + +def _validate_calculation(calculation): + """Check if the parameters of a calculation are there and of the right format""" + var_name = calculation.get("column_name", None) + fhandle = calculation.get("operation", None) + + if var_name is None: + raise ValueError( + f"The 'column_name' under which the calculation should be saved in the results DataFrame is missing from the calculation dict: {calc}. Please check your input or look at help(apply_calculations) for the formatting of the calculation dict" + ) + + if not callable(fhandle): + raise ValueError( + f"The provided function handle for calculation of column '{var_name}' is not callable" + ) + def infer_busses_carrier(energy_system): """Loop through the nodes of an energy system and infer the carrier of busses from them @@ -271,6 +297,7 @@ def process_raw_results(df_results): df_results["flow_min"] = temp.min(axis=1) df_results["flow_max"] = temp.max(axis=1) df_results["aggregated_flow"] = temp.sum(axis=1) + return df_results def process_raw_inputs(df_results, dp_path, raw_inputs=RAW_INPUTS): @@ -315,8 +342,8 @@ def apply_calculations(results_df, calculations=CALCULATED_OUTPUTS): ---------- df_results: pandas DataFrame the outcome of process_raw_input() - calculations: dict - dict containing + calculations: list of dict + each dict should contain "column_name" (the name of the new column within results_df), "operation" (handle of a function which will be applied row-wise to results_df), "description" (a string for documentation purposes) @@ -327,13 +354,16 @@ def apply_calculations(results_df, calculations=CALCULATED_OUTPUTS): """ for calc in calculations: - var_name = calc["column_name"] + _validate_calculation(calc) + var_name = calc.get("column_name") + argument_names = calc.get("argument_names", []) + func_handle = calc.get("operation") try: - results_df[var_name] = results_df.apply( - calc["operation"], - argument_names=calc["argument_names"], - col_name=var_name, - axis=1, - ) + _check_arguments(results_df, column_names=argument_names, col_name=var_name) except AttributeError as e: - print(e) + logging.warning(e) + + results_df[var_name] = results_df.apply( + func_handle, + axis=1, + ) diff --git a/src/oemof_tabular_plugins/general/post_processing/post_processing.py b/src/oemof_tabular_plugins/general/post_processing/post_processing.py index f94f4758..69d1e1f7 100644 --- a/src/oemof_tabular_plugins/general/post_processing/post_processing.py +++ b/src/oemof_tabular_plugins/general/post_processing/post_processing.py @@ -659,7 +659,7 @@ def create_costs_table(all_scalars, results, capacities_df, storage_capacities_d class OTPCalculator(Calculator): def __init__(self, input_parameters, energy_system, dp_path): self.df_results = construct_dataframe_from_results(energy_system) - process_raw_results(self.df_results) + self.df_results = process_raw_results(self.df_results) self.df_results = process_raw_inputs(self.df_results, dp_path) apply_calculations(self.df_results) super().__init__(input_parameters, energy_system.results) From 96ece15a82e036ffef6f8988a492832e87e84ee4 Mon Sep 17 00:00:00 2001 From: "pierre-francois.duc" Date: Thu, 6 Jun 2024 12:11:19 +0200 Subject: [PATCH 05/20] Pseudo code for package validation for specific components --- examples/scripts/compute.py | 1 + .../datapackage/building.py | 14 ++++++++++++-- src/oemof_tabular_plugins/wefe/facades/mimo.py | 17 +++++++++++++++++ .../wefe/facades/pv_panel.py | 5 +++++ 4 files changed, 35 insertions(+), 2 deletions(-) create mode 100644 src/oemof_tabular_plugins/wefe/facades/mimo.py diff --git a/examples/scripts/compute.py b/examples/scripts/compute.py index 30914789..dc551e28 100644 --- a/examples/scripts/compute.py +++ b/examples/scripts/compute.py @@ -55,6 +55,7 @@ otp_building.infer_metadata_from_data( package_name=scenario, path=scenario_dir, + typemap=TYPEMAP, ) # create energy system object from the datapackage diff --git a/src/oemof_tabular_plugins/datapackage/building.py b/src/oemof_tabular_plugins/datapackage/building.py index ed72fa68..3e4ca42b 100644 --- a/src/oemof_tabular_plugins/datapackage/building.py +++ b/src/oemof_tabular_plugins/datapackage/building.py @@ -108,17 +108,21 @@ def infer_resource_foreign_keys(resource, sequences_profiles_to_resource, busses return r -def infer_package_foreign_keys(package): +def infer_package_foreign_keys(package, typemap=None): """Infer the foreign_keys from data/elements and data/sequences and update meta data Parameters ---------- package + typemap Returns ------- """ + if typemap is None: + typemap = {} + p = package sequences_profiles_to_resource = map_sequence_profiles_to_resource_name(p) @@ -133,6 +137,11 @@ def infer_package_foreign_keys(package): r = infer_resource_foreign_keys( r, sequences_profiles_to_resource, busses=bus_data.name.to_list() ) + + if r.name in typemap: + # TODO here test if facade_type has the method 'validate_datapackage' + r = typemap[r.name].validate_datapackage(r) + p.remove_resource(r.name) p.add_resource(r.descriptor) @@ -141,6 +150,7 @@ def infer_metadata_from_data( package_name="default-name", path=None, metadata_filename="datapackage.json", + typemap=None, ): """ @@ -185,7 +195,7 @@ def infer_resource_basic_foreign_keys(resource): # reload the package from the saved json file p = Package(os.path.join(path, metadata_filename)) - infer_package_foreign_keys(p) + infer_package_foreign_keys(p, typemap=typemap) p.descriptor["resources"].sort(key=lambda x: (x["path"], x["name"])) p.commit() p.save(os.path.join(path, metadata_filename)) diff --git a/src/oemof_tabular_plugins/wefe/facades/mimo.py b/src/oemof_tabular_plugins/wefe/facades/mimo.py new file mode 100644 index 00000000..5fa60422 --- /dev/null +++ b/src/oemof_tabular_plugins/wefe/facades/mimo.py @@ -0,0 +1,17 @@ +from oemof.industry import MimoFacade + + +def validate_mimo_datapackage(cls, resource): + # modify the resource (datapackage.resource) + # should it return the resource? + pass + + +def processing_mimo_raw_inputs(cls, resource, results_df): + # function to apply on df from above (drop the thee columns (conversion_factor_ac-elec-bus, conversion_factor_permeate-bus, conversion_factor_brine-bus) and turn them into one conversion_factor column), then add the primary column + + return results_df + + +MimoFacade.validate_datapackage = classmethod(validate_mimo_datapackage) +MimoFacade.processing_raw_inputs = classmethod(processing_mimo_raw_inputs) diff --git a/src/oemof_tabular_plugins/wefe/facades/pv_panel.py b/src/oemof_tabular_plugins/wefe/facades/pv_panel.py index b7f74cb9..beaa8e0e 100644 --- a/src/oemof_tabular_plugins/wefe/facades/pv_panel.py +++ b/src/oemof_tabular_plugins/wefe/facades/pv_panel.py @@ -149,3 +149,8 @@ def build_solph_components(self): ) } ) + + def validate_datapackage(self, resource): + # modify the resource (datapackage.resource) + # should it return the resource? + pass From 2de97f756c0fe4ee013c1668018b1030f9fcc4d0 Mon Sep 17 00:00:00 2001 From: "pierre-francois.duc" Date: Thu, 6 Jun 2024 12:46:31 +0200 Subject: [PATCH 06/20] Provide a structure for post-processing the raw inputs of specific facades Adapt the MIMO Facade package validation method --- examples/scripts/compute.py | 5 +-- .../datapackage/building.py | 14 ++++---- .../datapackage/post_processing.py | 12 ++++++- .../wefe/facades/__init__.py | 1 + .../wefe/facades/mimo.py | 35 +++++++++++++++---- .../wefe/facades/pv_panel.py | 5 +++ 6 files changed, 56 insertions(+), 16 deletions(-) diff --git a/examples/scripts/compute.py b/examples/scripts/compute.py index dc551e28..459d92d8 100644 --- a/examples/scripts/compute.py +++ b/examples/scripts/compute.py @@ -17,7 +17,7 @@ pre_processing, logger, ) -from oemof_tabular_plugins.wefe.facades import PVPanel +from oemof_tabular_plugins.wefe.facades import PVPanel, MIMO # -------------- RELEVANT PATHS -------------- @@ -26,7 +26,7 @@ # -------------- USER INPUTS -------------- # list of scenarios to be evaluated -scenarios = ["wefe_pv_panel"] +scenarios = ["wefe_reverse_osmosis_test"] # weighted average cost of capital (WACC) - might move later # this parameter is needed if CAPEX, OPEX fix and lifetime are included wacc = 0.06 @@ -38,6 +38,7 @@ moo = False # add PV Panel (from oemof-tabular-plugins) to facades type map (from oemof-tabular) - might move later TYPEMAP["pv-panel"] = PVPanel +TYPEMAP["mimo"] = MIMO # -------------- RUNNING THE SCENARIOS -------------- for scenario in scenarios: diff --git a/src/oemof_tabular_plugins/datapackage/building.py b/src/oemof_tabular_plugins/datapackage/building.py index 3e4ca42b..167aee1c 100644 --- a/src/oemof_tabular_plugins/datapackage/building.py +++ b/src/oemof_tabular_plugins/datapackage/building.py @@ -113,11 +113,8 @@ def infer_package_foreign_keys(package, typemap=None): Parameters ---------- - package - typemap - - Returns - ------- + package: scenario datapackage + typemap: facade typemap """ if typemap is None: @@ -139,8 +136,11 @@ def infer_package_foreign_keys(package, typemap=None): ) if r.name in typemap: - # TODO here test if facade_type has the method 'validate_datapackage' - r = typemap[r.name].validate_datapackage(r) + facade_type = typemap[r.name] + # test if facade_type has the method 'validate_datapackage' + if hasattr(facade_type, "validate_datapackage"): + # apply the method if it exists + facade_type.validate_datapackage(r) p.remove_resource(r.name) p.add_resource(r.descriptor) diff --git a/src/oemof_tabular_plugins/datapackage/post_processing.py b/src/oemof_tabular_plugins/datapackage/post_processing.py index 2287af3e..64cb1e09 100644 --- a/src/oemof_tabular_plugins/datapackage/post_processing.py +++ b/src/oemof_tabular_plugins/datapackage/post_processing.py @@ -119,6 +119,9 @@ def infer_busses_carrier(energy_system): if hasattr(node, attribute): bus_label = getattr(node, attribute).label + print( + f"Node: {node}, Attribute: {attribute}, Bus Label: {bus_label}" + ) # Debug print if bus_label in busses_carrier: if busses_carrier[bus_label] != node.carrier: raise ValueError( @@ -300,7 +303,7 @@ def process_raw_results(df_results): return df_results -def process_raw_inputs(df_results, dp_path, raw_inputs=RAW_INPUTS): +def process_raw_inputs(df_results, dp_path, raw_inputs=RAW_INPUTS, typemap=None): """Find the input parameters from the datapackage.json file @@ -317,6 +320,9 @@ def process_raw_inputs(df_results, dp_path, raw_inputs=RAW_INPUTS): ------- """ + if typemap is None: + typemap = {} + p = Package(dp_path) inputs_df = None for r in p.resources: @@ -330,6 +336,10 @@ def process_raw_inputs(df_results, dp_path, raw_inputs=RAW_INPUTS): else: inputs_df = inputs_df.join(resource_inputs) + if r.name in typemap: + # TODO here test if facade_type has the method 'validate_datapackage' + inputs_df = typemap[r.name].processing_raw_inputs(r, inputs_df) + # append the inputs of the datapackage to the results DataFrame inputs_df.T.index.name = "asset" return df_results.join(inputs_df.T.apply(pd.to_numeric, downcast="float")) diff --git a/src/oemof_tabular_plugins/wefe/facades/__init__.py b/src/oemof_tabular_plugins/wefe/facades/__init__.py index 6626ef51..cb1e53a6 100644 --- a/src/oemof_tabular_plugins/wefe/facades/__init__.py +++ b/src/oemof_tabular_plugins/wefe/facades/__init__.py @@ -1 +1,2 @@ from .pv_panel import PVPanel +from .mimo import MIMO diff --git a/src/oemof_tabular_plugins/wefe/facades/mimo.py b/src/oemof_tabular_plugins/wefe/facades/mimo.py index 5fa60422..ab26d450 100644 --- a/src/oemof_tabular_plugins/wefe/facades/mimo.py +++ b/src/oemof_tabular_plugins/wefe/facades/mimo.py @@ -1,17 +1,40 @@ -from oemof.industry import MimoFacade +from oemof_industry.mimo_converter import MIMO def validate_mimo_datapackage(cls, resource): - # modify the resource (datapackage.resource) - # should it return the resource? + """ + This datapackage validation method is necessary for the MIMO converter because the + 'primary' field automatically gets updated to the foreign keys in the datapackage.json + by the infer_metadata_from_data, but it should not be interpreted as a foreign key. + This method removes it. + + :param cls: class instance + :param resource: the datapackage resource + """ + print("resource in validation: ", resource) + # check if the 'foreignKeys' field exists in the schema + if ( + "schema" in resource.descriptor + and "foreignKeys" in resource.descriptor["schema"] + ): + # loop through each foreign key + for foreign_key in resource.descriptor["schema"]["foreignKeys"]: + print(foreign_key) + if "primary" in foreign_key["fields"]: + print(foreign_key) + # remove the foreign_key regarding 'primary' from the resource + resource.descriptor["schema"]["foreignKeys"].remove(foreign_key) + break pass def processing_mimo_raw_inputs(cls, resource, results_df): - # function to apply on df from above (drop the thee columns (conversion_factor_ac-elec-bus, conversion_factor_permeate-bus, conversion_factor_brine-bus) and turn them into one conversion_factor column), then add the primary column + # function to apply on df from above (drop the thee columns (conversion_factor_ac-elec-bus, + # conversion_factor_permeate-bus, conversion_factor_brine-bus) and turn them into one + # conversion_factor column), then add the primary column return results_df -MimoFacade.validate_datapackage = classmethod(validate_mimo_datapackage) -MimoFacade.processing_raw_inputs = classmethod(processing_mimo_raw_inputs) +MIMO.validate_datapackage = classmethod(validate_mimo_datapackage) +MIMO.processing_raw_inputs = classmethod(processing_mimo_raw_inputs) diff --git a/src/oemof_tabular_plugins/wefe/facades/pv_panel.py b/src/oemof_tabular_plugins/wefe/facades/pv_panel.py index beaa8e0e..ee945ff9 100644 --- a/src/oemof_tabular_plugins/wefe/facades/pv_panel.py +++ b/src/oemof_tabular_plugins/wefe/facades/pv_panel.py @@ -150,6 +150,11 @@ def build_solph_components(self): } ) + def processing_raw_inputs(self, resource, results_df): + # function to apply on df from above + + return results_df + def validate_datapackage(self, resource): # modify the resource (datapackage.resource) # should it return the resource? From 05ab622b5255ea5212e2849247ea1dadb0ac733f Mon Sep 17 00:00:00 2001 From: Ciara Dunks Date: Fri, 7 Jun 2024 14:56:54 +0200 Subject: [PATCH 07/20] Raw inputs included in df_results and fix for carrier/multiple bus/MIMO issue --- .../data/elements/storage.csv | 2 +- .../data/elements/volatile.csv | 6 +- .../datapackage.json | 648 ++++++++++++++++++ .../datapackage/post_processing.py | 30 +- .../post_processing/post_processing.py | 15 +- 5 files changed, 682 insertions(+), 19 deletions(-) create mode 100644 examples/scenarios/general_custom_attributes/datapackage.json diff --git a/examples/scenarios/general_custom_attributes/data/elements/storage.csv b/examples/scenarios/general_custom_attributes/data/elements/storage.csv index 89f65805..743b89cf 100644 --- a/examples/scenarios/general_custom_attributes/data/elements/storage.csv +++ b/examples/scenarios/general_custom_attributes/data/elements/storage.csv @@ -1,2 +1,2 @@ name;carrier;tech;storage_capacity;storage_capacity_potential;capacity;efficiency;capacity_cost;storage_capacity_cost;marginal_cost;type;bus;invest_relation_output_capacity;invest_relation_input_output;expandable;emission_factor -battery-storage;lithium;battery;0;100;0;0.92;0;61553;1.8;storage;elec-bus;0.1;1;True;0 +battery-storage;electricity;battery;0;100;0;0.92;0;61553;1.8;storage;elec-bus;0.1;1;True;0 diff --git a/examples/scenarios/general_custom_attributes/data/elements/volatile.csv b/examples/scenarios/general_custom_attributes/data/elements/volatile.csv index a24033df..0f49ecca 100644 --- a/examples/scenarios/general_custom_attributes/data/elements/volatile.csv +++ b/examples/scenarios/general_custom_attributes/data/elements/volatile.csv @@ -1,4 +1,4 @@ name;type;carrier;tech;capacity;capacity_cost;bus;marginal_cost;profile;expandable;emission_factor;renewable_factor -wind;volatile;wind;onshore;0;161151;elec-bus;0;wind-profile;True;0;1 -pv;volatile;solar;pv;0;99405;elec-bus;0;pv-profile;True;0;1 -hydro;volatile;hydro;hydro;0;218011;elec-bus;0;1;True;0;1 +wind;volatile;electricity;onshore;0;161151;elec-bus;0;wind-profile;True;0;1 +pv;volatile;electricity;pv;0;99405;elec-bus;0;pv-profile;True;0;1 +hydro;volatile;electricity;hydro;0;218011;elec-bus;0;1;True;0;1 diff --git a/examples/scenarios/general_custom_attributes/datapackage.json b/examples/scenarios/general_custom_attributes/datapackage.json new file mode 100644 index 00000000..310fd29a --- /dev/null +++ b/examples/scenarios/general_custom_attributes/datapackage.json @@ -0,0 +1,648 @@ +{ + "profile": "tabular-data-package", + "name": "general_custom_attributes", + "oemof_tabular_version": "0.0.5", + "resources": [ + { + "path": "data/elements/bus.csv", + "profile": "tabular-data-resource", + "name": "bus", + "format": "csv", + "mediatype": "text/csv", + "encoding": "utf-8", + "schema": { + "fields": [ + { + "name": "name", + "type": "string", + "format": "default" + }, + { + "name": "type", + "type": "string", + "format": "default" + }, + { + "name": "balanced", + "type": "boolean", + "format": "default" + } + ], + "missingValues": [ + "" + ], + "primaryKey": "name", + "foreignKeys": [] + } + }, + { + "path": "data/elements/conversion.csv", + "profile": "tabular-data-resource", + "name": "conversion", + "format": "csv", + "mediatype": "text/csv", + "encoding": "utf-8", + "schema": { + "fields": [ + { + "name": "name", + "type": "string", + "format": "default" + }, + { + "name": "type", + "type": "string", + "format": "default" + }, + { + "name": "carrier", + "type": "string", + "format": "default" + }, + { + "name": "tech", + "type": "string", + "format": "default" + }, + { + "name": "capacity", + "type": "integer", + "format": "default" + }, + { + "name": "capacity_cost", + "type": "integer", + "format": "default" + }, + { + "name": "marginal_cost", + "type": "number", + "format": "default" + }, + { + "name": "carrier_cost", + "type": "number", + "format": "default" + }, + { + "name": "efficiency", + "type": "number", + "format": "default" + }, + { + "name": "from_bus", + "type": "string", + "format": "default" + }, + { + "name": "to_bus", + "type": "string", + "format": "default" + }, + { + "name": "expandable", + "type": "boolean", + "format": "default" + } + ], + "missingValues": [ + "" + ], + "primaryKey": "name", + "foreignKeys": [ + { + "fields": "from_bus", + "reference": { + "resource": "bus", + "fields": "name" + } + }, + { + "fields": "to_bus", + "reference": { + "resource": "bus", + "fields": "name" + } + } + ] + } + }, + { + "path": "data/elements/dispatchable.csv", + "profile": "tabular-data-resource", + "name": "dispatchable", + "format": "csv", + "mediatype": "text/csv", + "encoding": "utf-8", + "schema": { + "fields": [ + { + "name": "name", + "type": "string", + "format": "default" + }, + { + "name": "type", + "type": "string", + "format": "default" + }, + { + "name": "carrier", + "type": "string", + "format": "default" + }, + { + "name": "tech", + "type": "string", + "format": "default" + }, + { + "name": "capacity", + "type": "integer", + "format": "default" + }, + { + "name": "capacity_cost", + "type": "integer", + "format": "default" + }, + { + "name": "bus", + "type": "string", + "format": "default" + }, + { + "name": "marginal_cost", + "type": "integer", + "format": "default" + }, + { + "name": "carrier_cost", + "type": "integer", + "format": "default" + }, + { + "name": "profile", + "type": "integer", + "format": "default" + }, + { + "name": "expandable", + "type": "boolean", + "format": "default" + }, + { + "name": "emission_factor", + "type": "integer", + "format": "default" + }, + { + "name": "renewable_factor", + "type": "integer", + "format": "default" + }, + { + "name": "output_parameters", + "type": "object", + "format": "default" + } + ], + "missingValues": [ + "" + ], + "primaryKey": "name", + "foreignKeys": [ + { + "fields": "bus", + "reference": { + "resource": "bus", + "fields": "name" + } + } + ] + } + }, + { + "path": "data/elements/excess.csv", + "profile": "tabular-data-resource", + "name": "excess", + "format": "csv", + "mediatype": "text/csv", + "encoding": "utf-8", + "schema": { + "fields": [ + { + "name": "name", + "type": "string", + "format": "default" + }, + { + "name": "type", + "type": "string", + "format": "default" + }, + { + "name": "bus", + "type": "string", + "format": "default" + }, + { + "name": "marginal_cost", + "type": "integer", + "format": "default" + } + ], + "missingValues": [ + "" + ], + "primaryKey": "name", + "foreignKeys": [ + { + "fields": "bus", + "reference": { + "resource": "bus", + "fields": "name" + } + } + ] + } + }, + { + "path": "data/elements/load.csv", + "profile": "tabular-data-resource", + "name": "load", + "format": "csv", + "mediatype": "text/csv", + "encoding": "utf-8", + "schema": { + "fields": [ + { + "name": "name", + "type": "string", + "format": "default" + }, + { + "name": "amount", + "type": "integer", + "format": "default" + }, + { + "name": "profile", + "type": "string", + "format": "default" + }, + { + "name": "type", + "type": "string", + "format": "default" + }, + { + "name": "bus", + "type": "string", + "format": "default" + } + ], + "missingValues": [ + "" + ], + "primaryKey": "name", + "foreignKeys": [ + { + "fields": "bus", + "reference": { + "resource": "bus", + "fields": "name" + } + }, + { + "fields": "profile", + "reference": { + "resource": "load_profile" + } + } + ] + } + }, + { + "path": "data/elements/shortage.csv", + "profile": "tabular-data-resource", + "name": "shortage", + "format": "csv", + "mediatype": "text/csv", + "encoding": "utf-8", + "schema": { + "fields": [ + { + "name": "name", + "type": "string", + "format": "default" + }, + { + "name": "type", + "type": "string", + "format": "default" + }, + { + "name": "carrier", + "type": "string", + "format": "default" + }, + { + "name": "tech", + "type": "string", + "format": "default" + }, + { + "name": "bus", + "type": "string", + "format": "default" + }, + { + "name": "marginal_cost", + "type": "integer", + "format": "default" + }, + { + "name": "capacity", + "type": "integer", + "format": "default" + } + ], + "missingValues": [ + "" + ], + "primaryKey": "name", + "foreignKeys": [ + { + "fields": "bus", + "reference": { + "resource": "bus", + "fields": "name" + } + } + ] + } + }, + { + "path": "data/elements/storage.csv", + "profile": "tabular-data-resource", + "name": "storage", + "format": "csv", + "mediatype": "text/csv", + "encoding": "utf-8", + "schema": { + "fields": [ + { + "name": "name", + "type": "string", + "format": "default" + }, + { + "name": "carrier", + "type": "string", + "format": "default" + }, + { + "name": "tech", + "type": "string", + "format": "default" + }, + { + "name": "storage_capacity", + "type": "integer", + "format": "default" + }, + { + "name": "storage_capacity_potential", + "type": "integer", + "format": "default" + }, + { + "name": "capacity", + "type": "integer", + "format": "default" + }, + { + "name": "efficiency", + "type": "number", + "format": "default" + }, + { + "name": "capacity_cost", + "type": "integer", + "format": "default" + }, + { + "name": "storage_capacity_cost", + "type": "integer", + "format": "default" + }, + { + "name": "marginal_cost", + "type": "number", + "format": "default" + }, + { + "name": "type", + "type": "string", + "format": "default" + }, + { + "name": "bus", + "type": "string", + "format": "default" + }, + { + "name": "invest_relation_output_capacity", + "type": "number", + "format": "default" + }, + { + "name": "invest_relation_input_output", + "type": "integer", + "format": "default" + }, + { + "name": "expandable", + "type": "boolean", + "format": "default" + }, + { + "name": "emission_factor", + "type": "integer", + "format": "default" + }, + { + "name": "output_parameters", + "type": "object", + "format": "default" + } + ], + "missingValues": [ + "" + ], + "primaryKey": "name", + "foreignKeys": [ + { + "fields": "bus", + "reference": { + "resource": "bus", + "fields": "name" + } + } + ] + } + }, + { + "path": "data/elements/volatile.csv", + "profile": "tabular-data-resource", + "name": "volatile", + "format": "csv", + "mediatype": "text/csv", + "encoding": "utf-8", + "schema": { + "fields": [ + { + "name": "name", + "type": "string", + "format": "default" + }, + { + "name": "type", + "type": "string", + "format": "default" + }, + { + "name": "carrier", + "type": "string", + "format": "default" + }, + { + "name": "tech", + "type": "string", + "format": "default" + }, + { + "name": "capacity", + "type": "integer", + "format": "default" + }, + { + "name": "capacity_cost", + "type": "integer", + "format": "default" + }, + { + "name": "bus", + "type": "string", + "format": "default" + }, + { + "name": "marginal_cost", + "type": "integer", + "format": "default" + }, + { + "name": "profile", + "type": "string", + "format": "default" + }, + { + "name": "expandable", + "type": "boolean", + "format": "default" + }, + { + "name": "emission_factor", + "type": "integer", + "format": "default" + }, + { + "name": "renewable_factor", + "type": "integer", + "format": "default" + }, + { + "name": "output_parameters", + "type": "object", + "format": "default" + } + ], + "missingValues": [ + "" + ], + "primaryKey": "name", + "foreignKeys": [ + { + "fields": "bus", + "reference": { + "resource": "bus", + "fields": "name" + } + }, + { + "fields": "profile", + "reference": { + "resource": "volatile_profile" + } + } + ] + } + }, + { + "path": "data/sequences/load_profile.csv", + "profile": "tabular-data-resource", + "name": "load_profile", + "format": "csv", + "mediatype": "text/csv", + "encoding": "utf-8", + "schema": { + "fields": [ + { + "name": "timeindex", + "type": "datetime", + "format": "default" + }, + { + "name": "electricity-load-profile", + "type": "number", + "format": "default" + } + ], + "missingValues": [ + "" + ] + } + }, + { + "path": "data/sequences/volatile_profile.csv", + "profile": "tabular-data-resource", + "name": "volatile_profile", + "format": "csv", + "mediatype": "text/csv", + "encoding": "utf-8", + "schema": { + "fields": [ + { + "name": "timeindex", + "type": "datetime", + "format": "default" + }, + { + "name": "wind-profile", + "type": "number", + "format": "default" + }, + { + "name": "pv-profile", + "type": "number", + "format": "default" + } + ], + "missingValues": [ + "" + ] + } + } + ] +} diff --git a/src/oemof_tabular_plugins/datapackage/post_processing.py b/src/oemof_tabular_plugins/datapackage/post_processing.py index 64cb1e09..3e49b71c 100644 --- a/src/oemof_tabular_plugins/datapackage/post_processing.py +++ b/src/oemof_tabular_plugins/datapackage/post_processing.py @@ -115,15 +115,18 @@ def infer_busses_carrier(energy_system): for node in energy_system.nodes: if hasattr(node, "carrier"): - for attribute in ("bus", "from_bus"): + # quick fix to work for MIMO component + # ToDo: assign carrier to busses instead of components to avoid problems + for attribute in ("bus", "from_bus", "from_bus_0", "to_bus_1"): if hasattr(node, attribute): bus_label = getattr(node, attribute).label - print( - f"Node: {node}, Attribute: {attribute}, Bus Label: {bus_label}" - ) # Debug print if bus_label in busses_carrier: if busses_carrier[bus_label] != node.carrier: + print( + "busses carrier[bus label]", busses_carrier[bus_label] + ) + print("node.carrier: ", node.carrier) raise ValueError( f"Two different carriers ({busses_carrier[bus_label]}, {node.carrier}) are associated to the same bus '{bus_label}'" ) @@ -255,13 +258,17 @@ def construct_dataframe_from_results(energy_system, bus_carrier=True, asset_type investments = [] flows = [] for x, res in solph.views.convert_keys_to_strings(results).items(): - if x[1] != "None": + # filter out entries where the second element of the tuple is 'None' and ensure the + # tuple has exactly two elements + if x[1] != "None" and len(x) == 2: col_name = res["sequences"].columns[0] ts.append( res["sequences"].rename( columns={col_name: x, "variable_name": "timesteps"} ) ) + # here change this information for flow_tuple in ('mimo', 'in_group_0', '0') + print(x) flows.append( construct_multi_index_levels( x, busses_info=busses_info, assets_info=assets_info @@ -324,11 +331,14 @@ def process_raw_inputs(df_results, dp_path, raw_inputs=RAW_INPUTS, typemap=None) typemap = {} p = Package(dp_path) - inputs_df = None + # initialise inputs_df with raw inputs as indexes + inputs_df = pd.DataFrame(index=raw_inputs) + # inputs_df = None for r in p.resources: if "elements" in r.descriptor["path"] and r.name != "bus": df = pd.DataFrame.from_records(r.read(keyed=True), index="name") resource_inputs = df[list(set(raw_inputs).intersection(set(df.columns)))].T + print("resource inputs: ", resource_inputs) if inputs_df is None: if not resource_inputs.empty: @@ -336,10 +346,12 @@ def process_raw_inputs(df_results, dp_path, raw_inputs=RAW_INPUTS, typemap=None) else: inputs_df = inputs_df.join(resource_inputs) - if r.name in typemap: - # TODO here test if facade_type has the method 'validate_datapackage' - inputs_df = typemap[r.name].processing_raw_inputs(r, inputs_df) + # if r.name in typemap: + # TODO here test if facade_type has the method 'validate_datapackage' + # inputs_df = typemap[r.name].processing_raw_inputs(r, inputs_df) + # kick out the lines where all values are NaN + inputs_df = inputs_df.dropna(how="all") # append the inputs of the datapackage to the results DataFrame inputs_df.T.index.name = "asset" return df_results.join(inputs_df.T.apply(pd.to_numeric, downcast="float")) diff --git a/src/oemof_tabular_plugins/general/post_processing/post_processing.py b/src/oemof_tabular_plugins/general/post_processing/post_processing.py index 69d1e1f7..dc59237d 100644 --- a/src/oemof_tabular_plugins/general/post_processing/post_processing.py +++ b/src/oemof_tabular_plugins/general/post_processing/post_processing.py @@ -658,10 +658,13 @@ def create_costs_table(all_scalars, results, capacities_df, storage_capacities_d class OTPCalculator(Calculator): def __init__(self, input_parameters, energy_system, dp_path): - self.df_results = construct_dataframe_from_results(energy_system) - self.df_results = process_raw_results(self.df_results) - self.df_results = process_raw_inputs(self.df_results, dp_path) - apply_calculations(self.df_results) + try: + self.df_results = construct_dataframe_from_results(energy_system) + self.df_results = process_raw_results(self.df_results) + self.df_results = process_raw_inputs(self.df_results, dp_path) + apply_calculations(self.df_results) + except Exception as e: + print(e) super().__init__(input_parameters, energy_system.results) @@ -740,13 +743,13 @@ def post_processing(params, es, results_path, dp_path): kpi_variables = [ "specific_system_cost", "renewable_share", - "total_emissions", + # "total_emissions", "total_land_requirement", ] kpi_values = [ calculate_specific_system_cost(all_scalars, total_system_costs), calculate_renewable_share(results), - calculate_total_emissions(results), + # calculate_total_emissions(results), calculate_total_land_requirement(results, capacities_df, storage_capacities_df), ] # filter out None values From c9144eb5efa45f4c4035324297af5a0e31c57496 Mon Sep 17 00:00:00 2001 From: "ciara.dunks" Date: Thu, 13 Jun 2024 16:28:43 +0200 Subject: [PATCH 08/20] Update results dataframe calculations --- examples/scripts/compute.py | 10 +- .../datapackage/post_processing.py | 131 ++++++++++++++---- .../post_processing/post_processing.py | 20 +-- 3 files changed, 123 insertions(+), 38 deletions(-) diff --git a/examples/scripts/compute.py b/examples/scripts/compute.py index 459d92d8..40403438 100644 --- a/examples/scripts/compute.py +++ b/examples/scripts/compute.py @@ -26,14 +26,20 @@ # -------------- USER INPUTS -------------- # list of scenarios to be evaluated -scenarios = ["wefe_reverse_osmosis_test"] +scenarios = ["cp_nigeria_usungwe_example"] # weighted average cost of capital (WACC) - might move later # this parameter is needed if CAPEX, OPEX fix and lifetime are included wacc = 0.06 # -------------- ADDITIONAL FUNCTIONALITIES (OEMOF-TABULAR-PLUGINS) -------------- # include the custom attribute parameters to be included in the model -custom_attributes = ["emission_factor", "renewable_factor", "land_requirement"] +# this can be moved somewhere and included in a dict or something similar with all possible additional attributes +custom_attributes = [ + "emission_factor", + "renewable_factor", + "land_requirement_factor", + "water_footprint_factor", +] # set whether the multi-objective optimization should be performed moo = False # add PV Panel (from oemof-tabular-plugins) to facades type map (from oemof-tabular) - might move later diff --git a/src/oemof_tabular_plugins/datapackage/post_processing.py b/src/oemof_tabular_plugins/datapackage/post_processing.py index 3e49b71c..12343cb2 100644 --- a/src/oemof_tabular_plugins/datapackage/post_processing.py +++ b/src/oemof_tabular_plugins/datapackage/post_processing.py @@ -3,8 +3,15 @@ import pandas as pd from datapackage import Package import oemof.solph as solph +import numpy as np +# ToDo: check to see if the storage optimized input/output (invest_out) and +# optimized capacity (invest) are saved correctly +# ToDo: see if variable costs are provided as a raw output, and if not +# they should be calculated with: if a flow is into component, multiply flow by carrier cost +# and if the flow is out of component, multiply it by marginal cost +# ToDo: is another raw output from the results is investment costs? or does this have to be calculated? RAW_OUTPUTS = ["investments"] PROCESSED_RAW_OUTPUTS = ["flow_min", "flow_max", "aggregated_flow"] RAW_INPUTS = [ @@ -13,22 +20,74 @@ "capacity_cost", "storage_capacity_cost", "capacity", - "expendable", + "expandable", "storage_capacity", "min_capacity", "max_capacity", "efficiency", + "capex", + "opex_fix", + "lifetime", "renewable_factor", "emission_factor", + "land_requirement_factor", + "water_footprint_factor", ] -def compute_renewable_share(results_df): - pass +def compute_total_capacity(results_df): + # ToDo: check for storage where there is both capacity and storage capacity + """Calculates total capacity by adding existing capacity (capacity) to optimized capacity (investments)""" + return results_df.capacity + results_df.investments -def compute_cO2_emissions(results_df): - pass +def compute_upfront_investment_costs(results_df): + # ToDo: check for storage if investments is based on correct parameter + """Calculates investment costs by multiplying capex with optimized capacity (investments)""" + if "capex" not in results_df.index: + return None + else: + return results_df.capex * results_df.investments + + +def compute_renewable_generation(results_df): + """Calculates renewable generation by multiplying aggregated flow by renewable factor""" + if "renewable_factor" not in results_df.index: + return None + else: + return results_df.aggregated_flow * results_df.renewable_factor + + +def compute_co2_emissions(results_df): + """Calculates CO2 emissions by multiplying aggregated flow by emission factor""" + if "emission_factor" not in results_df.index: + return None + else: + return results_df.aggregated_flow * results_df.emission_factor + + +def compute_additional_land_requirement(results_df): + """Calculates land requirement needed for optimized capacities""" + if "land_requirement_factor" not in results_df.index: + return None + else: + return results_df.investments * results_df.land_requirement_factor + + +def compute_total_land_requirement(results_df): + """Calculates land requirement needed for total capacities""" + if "land_requirement_factor" not in results_df.index: + return None + else: + return results_df.total_capacity * results_df.land_requirement_factor + + +def compute_water_footprint(results_df): + """Calculates water footprint by multiplying aggregated flow by water footprint factor""" + if "water_footprint_factor" not in results_df.index: + return None + else: + return results_df.aggregated_flow * results_df.water_footprint_factor def _check_arguments(df, column_names, col_name): @@ -40,20 +99,27 @@ def _check_arguments(df, column_names, col_name): ) -def compute_variable_costs(results_df): - """TODO write a docstring here""" - return results_df.aggregated_flow * ( - results_df.marginal_cost # .astype("float") - + results_df.carrier_cost # .astype("float") - ) - - # TODO turn the dict into a class simular to the one of Calculation of oemof.tabular CALCULATED_OUTPUTS = [ { - "column_name": "renewable_share", - "operation": compute_renewable_share, - "description": "The renewable share is computed from the flow and the renewable factor", + "column_name": "total_capacity", + "operation": compute_total_capacity, + "description": "The total capacity is calculated by adding the optimized capacity (investments) " + "to the existing capacity (capacity)", + "argument_names": ["investments", "capacity"], + }, + { + "column_name": "upfront_investment_costs", + "operation": compute_upfront_investment_costs, + "description": "Upfront investment costs are calculated by multiplying the optimized capacity " + "by the CAPEX", + "argument_names": ["investments", "capex"], + }, + { + "column_name": "renewable_generation", + "operation": compute_renewable_generation, + "description": "The renewable generation for each component is computed from the flow and the " + "renewable factor.", "argument_names": [ "aggregated_flow", "renewable_factor", @@ -61,15 +127,21 @@ def compute_variable_costs(results_df): }, { "column_name": "cO2_emmissions", - "operation": compute_cO2_emissions, - "description": "Compute the amount of CO2 emitted by each component of the system.", - "argument_names": ["aggregated_flow", "emission_factor", "tech"], + "operation": compute_co2_emissions, + "description": "CO2 emissions are calculated from the flow and the emission factor.", + "argument_names": ["aggregated_flow", "emission_factor"], }, { - "column_name": "variable_costs", - "operation": compute_variable_costs, - "description": "this output is calculated ...", - "argument_names": ["aggregated_flow", "marginal_cost", "carrier_cost"], + "column_name": "additional_land_requirement", + "operation": compute_additional_land_requirement, + "description": "The additional land requirement calculates the land required for the optimized capacities.", + "argument_names": ["investments", "emission_factor"], + }, + { + "column_name": "total_land_requirement", + "operation": compute_total_land_requirement, + "description": "The total land requirement calculates the land required for the total capacities.", + "argument_names": ["total_capacity", "emission_factor"], }, ] @@ -268,14 +340,13 @@ def construct_dataframe_from_results(energy_system, bus_carrier=True, asset_type ) ) # here change this information for flow_tuple in ('mimo', 'in_group_0', '0') - print(x) flows.append( construct_multi_index_levels( x, busses_info=busses_info, assets_info=assets_info ) ) - invest = None if res["scalars"].empty is True else res["scalars"].invest + invest = None if res["scalars"].empty is True else res["scalars"].invest investments.append(invest) ts_df = pd.concat(ts, axis=1, join="inner") mindex = pd.MultiIndex.from_tuples(flows, names=mi_levels) @@ -338,14 +409,11 @@ def process_raw_inputs(df_results, dp_path, raw_inputs=RAW_INPUTS, typemap=None) if "elements" in r.descriptor["path"] and r.name != "bus": df = pd.DataFrame.from_records(r.read(keyed=True), index="name") resource_inputs = df[list(set(raw_inputs).intersection(set(df.columns)))].T - print("resource inputs: ", resource_inputs) - if inputs_df is None: if not resource_inputs.empty: inputs_df = resource_inputs else: inputs_df = inputs_df.join(resource_inputs) - # if r.name in typemap: # TODO here test if facade_type has the method 'validate_datapackage' # inputs_df = typemap[r.name].processing_raw_inputs(r, inputs_df) @@ -384,8 +452,15 @@ def apply_calculations(results_df, calculations=CALCULATED_OUTPUTS): _check_arguments(results_df, column_names=argument_names, col_name=var_name) except AttributeError as e: logging.warning(e) + continue results_df[var_name] = results_df.apply( func_handle, axis=1, ) + # check if the new column contains all None values and remove it if so + if results_df[var_name].isna().all(): + results_df.drop(columns=[var_name], inplace=True) + logging.info( + f"Removed column '{var_name}' because it contains all None values." + ) diff --git a/src/oemof_tabular_plugins/general/post_processing/post_processing.py b/src/oemof_tabular_plugins/general/post_processing/post_processing.py index dc59237d..e3cd199d 100644 --- a/src/oemof_tabular_plugins/general/post_processing/post_processing.py +++ b/src/oemof_tabular_plugins/general/post_processing/post_processing.py @@ -658,13 +658,12 @@ def create_costs_table(all_scalars, results, capacities_df, storage_capacities_d class OTPCalculator(Calculator): def __init__(self, input_parameters, energy_system, dp_path): - try: - self.df_results = construct_dataframe_from_results(energy_system) - self.df_results = process_raw_results(self.df_results) - self.df_results = process_raw_inputs(self.df_results, dp_path) - apply_calculations(self.df_results) - except Exception as e: - print(e) + + self.df_results = construct_dataframe_from_results(energy_system) + self.df_results = process_raw_results(self.df_results) + self.df_results = process_raw_inputs(self.df_results, dp_path) + apply_calculations(self.df_results) + super().__init__(input_parameters, energy_system.results) @@ -680,8 +679,13 @@ def post_processing(params, es, results_path, dp_path): """ # initiate calculator for post-processing calculator = OTPCalculator(params, es, dp_path) - print(calculator.df_results) + # print(calculator.df_results) results = es.results + results_by_flow = calculator.df_results + results_by_flow.to_csv(results_path + "/results_by_flow.csv", index=True) + + # ----- OLD POST-PROCESSING - TO BE DELETED ONCE CERTAIN ----- + # calculate scalars using functions from clc module aggregated_flows = clc.AggregatedFlows(calculator).result storage_losses = clc.StorageLosses(calculator).result From 79ef354b71ff3e267a5ca0a08544a5f98c9648cc Mon Sep 17 00:00:00 2001 From: "ciara.dunks" Date: Thu, 13 Jun 2024 16:29:38 +0200 Subject: [PATCH 09/20] Adapted scenarios to work with new results formatting --- .../data/elements/storage.csv | 2 +- .../data/elements/volatile.csv | 6 +- .../general_basic/data/elements/storage.csv | 2 +- .../general_basic/data/elements/volatile.csv | 6 +- .../data/elements/shortage.csv | 2 - .../data/elements/storage.csv | 2 +- .../data/elements/volatile.csv | 6 +- .../data/elements/shortage.csv | 2 - .../datapackage.json | 648 ------------------ .../data/elements/bus.csv | 3 + .../data/elements/dispatchable.csv | 3 + .../data/elements/excess.csv | 2 + .../data/elements/load.csv | 2 + .../data/elements/pv_panel.csv | 2 + .../data/sequences/dispatchable_profile.csv | 25 + .../data/sequences/load_profile.csv | 25 + .../data/sequences/pv_panel_profile.csv | 0 .../wefe_custom_attributes/scripts/.gitkeep | 0 examples/scenarios/wefe_pv_panel/README.md | 5 - .../scenarios/wefe_reverse_osmosis/README.md | 5 - .../data/elements/bus.csv | 11 +- .../data/elements/conversion.csv | 5 +- .../data/elements/dispatchable.csv | 6 +- .../data/elements/excess.csv | 4 +- .../data/elements/load.csv | 5 +- .../data/elements/mimo.csv | 2 + .../data/elements/pv_panel.csv | 2 - .../data/elements/storage.csv | 5 +- .../data/elements/volatile.csv | 2 + .../data/sequences/dispatchable_profile.csv | 25 - .../data/sequences/load_profile.csv | 29 +- .../data/sequences/volatile_profile.csv | 4 + .../wefe_reverse_osmosis/datapackage.json | 450 ------------ 33 files changed, 104 insertions(+), 1194 deletions(-) delete mode 100644 examples/scenarios/general_constraints/data/elements/shortage.csv delete mode 100644 examples/scenarios/general_custom_attributes/data/elements/shortage.csv delete mode 100644 examples/scenarios/general_custom_attributes/datapackage.json create mode 100644 examples/scenarios/wefe_custom_attributes/data/elements/bus.csv create mode 100644 examples/scenarios/wefe_custom_attributes/data/elements/dispatchable.csv create mode 100644 examples/scenarios/wefe_custom_attributes/data/elements/excess.csv create mode 100644 examples/scenarios/wefe_custom_attributes/data/elements/load.csv create mode 100644 examples/scenarios/wefe_custom_attributes/data/elements/pv_panel.csv create mode 100644 examples/scenarios/wefe_custom_attributes/data/sequences/dispatchable_profile.csv create mode 100644 examples/scenarios/wefe_custom_attributes/data/sequences/load_profile.csv rename examples/scenarios/{wefe_reverse_osmosis => wefe_custom_attributes}/data/sequences/pv_panel_profile.csv (100%) create mode 100644 examples/scenarios/wefe_custom_attributes/scripts/.gitkeep delete mode 100644 examples/scenarios/wefe_pv_panel/README.md delete mode 100644 examples/scenarios/wefe_reverse_osmosis/README.md create mode 100644 examples/scenarios/wefe_reverse_osmosis/data/elements/mimo.csv delete mode 100644 examples/scenarios/wefe_reverse_osmosis/data/elements/pv_panel.csv create mode 100644 examples/scenarios/wefe_reverse_osmosis/data/elements/volatile.csv delete mode 100644 examples/scenarios/wefe_reverse_osmosis/data/sequences/dispatchable_profile.csv create mode 100644 examples/scenarios/wefe_reverse_osmosis/data/sequences/volatile_profile.csv delete mode 100644 examples/scenarios/wefe_reverse_osmosis/datapackage.json diff --git a/examples/scenarios/general_add_cost_inputs/data/elements/storage.csv b/examples/scenarios/general_add_cost_inputs/data/elements/storage.csv index ba09f510..2e83b7b4 100644 --- a/examples/scenarios/general_add_cost_inputs/data/elements/storage.csv +++ b/examples/scenarios/general_add_cost_inputs/data/elements/storage.csv @@ -1,2 +1,2 @@ name;carrier;tech;storage_capacity;storage_capacity_potential;capacity;efficiency;capacity_cost;storage_capacity_cost;marginal_cost;type;bus;invest_relation_output_capacity;invest_relation_input_output;expandable;capex;opex_fix;lifetime -battery-storage;lithium;battery;0;;0;0.92;0;44941.41;1.8;storage;elec-bus;1;1;True;338000;10140;15 +battery-storage;electricity;battery;0;;0;0.92;0;44941.41;1.8;storage;elec-bus;1;1;True;338000;10140;15 diff --git a/examples/scenarios/general_add_cost_inputs/data/elements/volatile.csv b/examples/scenarios/general_add_cost_inputs/data/elements/volatile.csv index d825274e..172a40ac 100644 --- a/examples/scenarios/general_add_cost_inputs/data/elements/volatile.csv +++ b/examples/scenarios/general_add_cost_inputs/data/elements/volatile.csv @@ -1,4 +1,4 @@ name;type;carrier;tech;capacity;capacity_cost;bus;marginal_cost;profile;expandable;capex;opex_fix;lifetime -wind;volatile;wind;onshore;0.0;146288.96;elec-bus;0.0;wind-profile;True;1362500;27500;20 -pv;volatile;solar;pv;0.0;86781.36;elec-bus;0.0;pv-profile;True;1050000;10500;30 -hydro;volatile;hydro;hydro;0.0;190192.3;elec-bus;0.0;1;True;2150000;47300;40 +wind;volatile;electricity;onshore;0.0;146288.96;elec-bus;0.0;wind-profile;True;1362500;27500;20 +pv;volatile;electricity;pv;0.0;86781.36;elec-bus;0.0;pv-profile;True;1050000;10500;30 +hydro;volatile;electricity;hydro;0.0;190192.3;elec-bus;0.0;1;True;2150000;47300;40 diff --git a/examples/scenarios/general_basic/data/elements/storage.csv b/examples/scenarios/general_basic/data/elements/storage.csv index 6f5da27f..e5a57c6c 100644 --- a/examples/scenarios/general_basic/data/elements/storage.csv +++ b/examples/scenarios/general_basic/data/elements/storage.csv @@ -1,2 +1,2 @@ name;carrier;tech;storage_capacity;storage_capacity_potential;capacity;efficiency;capacity_cost;storage_capacity_cost;marginal_cost;type;bus;invest_relation_output_capacity;invest_relation_input_output;expandable -battery-storage;lithium;battery;0;100;0;0.92;0;61553;1.8;storage;elec-bus;1;1;True +battery-storage;electricity;battery;0;100;0;0.92;0;61553;1.8;storage;elec-bus;1;1;True diff --git a/examples/scenarios/general_basic/data/elements/volatile.csv b/examples/scenarios/general_basic/data/elements/volatile.csv index 87bb0ce8..82278334 100644 --- a/examples/scenarios/general_basic/data/elements/volatile.csv +++ b/examples/scenarios/general_basic/data/elements/volatile.csv @@ -1,4 +1,4 @@ name;type;carrier;tech;capacity;capacity_cost;bus;marginal_cost;profile;expandable -wind;volatile;wind;onshore;0;161151;elec-bus;0;wind-profile;True -pv;volatile;solar;pv;0;99405;elec-bus;0;pv-profile;True -hydro;volatile;hydro;hydro;0;218011;elec-bus;0;1;True +wind;volatile;electricity;onshore;0;161151;elec-bus;0;wind-profile;True +pv;volatile;electricity;pv;0;99405;elec-bus;0;pv-profile;True +hydro;volatile;electricity;hydro;0;218011;elec-bus;0;1;True diff --git a/examples/scenarios/general_constraints/data/elements/shortage.csv b/examples/scenarios/general_constraints/data/elements/shortage.csv deleted file mode 100644 index fe2faf97..00000000 --- a/examples/scenarios/general_constraints/data/elements/shortage.csv +++ /dev/null @@ -1,2 +0,0 @@ -name;type;carrier;tech;bus;marginal_cost;capacity -elec-shortage;shortage;electricity;shortage;elec-bus;100000;0 diff --git a/examples/scenarios/general_constraints/data/elements/storage.csv b/examples/scenarios/general_constraints/data/elements/storage.csv index 89f65805..743b89cf 100644 --- a/examples/scenarios/general_constraints/data/elements/storage.csv +++ b/examples/scenarios/general_constraints/data/elements/storage.csv @@ -1,2 +1,2 @@ name;carrier;tech;storage_capacity;storage_capacity_potential;capacity;efficiency;capacity_cost;storage_capacity_cost;marginal_cost;type;bus;invest_relation_output_capacity;invest_relation_input_output;expandable;emission_factor -battery-storage;lithium;battery;0;100;0;0.92;0;61553;1.8;storage;elec-bus;0.1;1;True;0 +battery-storage;electricity;battery;0;100;0;0.92;0;61553;1.8;storage;elec-bus;0.1;1;True;0 diff --git a/examples/scenarios/general_constraints/data/elements/volatile.csv b/examples/scenarios/general_constraints/data/elements/volatile.csv index a24033df..0f49ecca 100644 --- a/examples/scenarios/general_constraints/data/elements/volatile.csv +++ b/examples/scenarios/general_constraints/data/elements/volatile.csv @@ -1,4 +1,4 @@ name;type;carrier;tech;capacity;capacity_cost;bus;marginal_cost;profile;expandable;emission_factor;renewable_factor -wind;volatile;wind;onshore;0;161151;elec-bus;0;wind-profile;True;0;1 -pv;volatile;solar;pv;0;99405;elec-bus;0;pv-profile;True;0;1 -hydro;volatile;hydro;hydro;0;218011;elec-bus;0;1;True;0;1 +wind;volatile;electricity;onshore;0;161151;elec-bus;0;wind-profile;True;0;1 +pv;volatile;electricity;pv;0;99405;elec-bus;0;pv-profile;True;0;1 +hydro;volatile;electricity;hydro;0;218011;elec-bus;0;1;True;0;1 diff --git a/examples/scenarios/general_custom_attributes/data/elements/shortage.csv b/examples/scenarios/general_custom_attributes/data/elements/shortage.csv deleted file mode 100644 index fe2faf97..00000000 --- a/examples/scenarios/general_custom_attributes/data/elements/shortage.csv +++ /dev/null @@ -1,2 +0,0 @@ -name;type;carrier;tech;bus;marginal_cost;capacity -elec-shortage;shortage;electricity;shortage;elec-bus;100000;0 diff --git a/examples/scenarios/general_custom_attributes/datapackage.json b/examples/scenarios/general_custom_attributes/datapackage.json deleted file mode 100644 index 310fd29a..00000000 --- a/examples/scenarios/general_custom_attributes/datapackage.json +++ /dev/null @@ -1,648 +0,0 @@ -{ - "profile": "tabular-data-package", - "name": "general_custom_attributes", - "oemof_tabular_version": "0.0.5", - "resources": [ - { - "path": "data/elements/bus.csv", - "profile": "tabular-data-resource", - "name": "bus", - "format": "csv", - "mediatype": "text/csv", - "encoding": "utf-8", - "schema": { - "fields": [ - { - "name": "name", - "type": "string", - "format": "default" - }, - { - "name": "type", - "type": "string", - "format": "default" - }, - { - "name": "balanced", - "type": "boolean", - "format": "default" - } - ], - "missingValues": [ - "" - ], - "primaryKey": "name", - "foreignKeys": [] - } - }, - { - "path": "data/elements/conversion.csv", - "profile": "tabular-data-resource", - "name": "conversion", - "format": "csv", - "mediatype": "text/csv", - "encoding": "utf-8", - "schema": { - "fields": [ - { - "name": "name", - "type": "string", - "format": "default" - }, - { - "name": "type", - "type": "string", - "format": "default" - }, - { - "name": "carrier", - "type": "string", - "format": "default" - }, - { - "name": "tech", - "type": "string", - "format": "default" - }, - { - "name": "capacity", - "type": "integer", - "format": "default" - }, - { - "name": "capacity_cost", - "type": "integer", - "format": "default" - }, - { - "name": "marginal_cost", - "type": "number", - "format": "default" - }, - { - "name": "carrier_cost", - "type": "number", - "format": "default" - }, - { - "name": "efficiency", - "type": "number", - "format": "default" - }, - { - "name": "from_bus", - "type": "string", - "format": "default" - }, - { - "name": "to_bus", - "type": "string", - "format": "default" - }, - { - "name": "expandable", - "type": "boolean", - "format": "default" - } - ], - "missingValues": [ - "" - ], - "primaryKey": "name", - "foreignKeys": [ - { - "fields": "from_bus", - "reference": { - "resource": "bus", - "fields": "name" - } - }, - { - "fields": "to_bus", - "reference": { - "resource": "bus", - "fields": "name" - } - } - ] - } - }, - { - "path": "data/elements/dispatchable.csv", - "profile": "tabular-data-resource", - "name": "dispatchable", - "format": "csv", - "mediatype": "text/csv", - "encoding": "utf-8", - "schema": { - "fields": [ - { - "name": "name", - "type": "string", - "format": "default" - }, - { - "name": "type", - "type": "string", - "format": "default" - }, - { - "name": "carrier", - "type": "string", - "format": "default" - }, - { - "name": "tech", - "type": "string", - "format": "default" - }, - { - "name": "capacity", - "type": "integer", - "format": "default" - }, - { - "name": "capacity_cost", - "type": "integer", - "format": "default" - }, - { - "name": "bus", - "type": "string", - "format": "default" - }, - { - "name": "marginal_cost", - "type": "integer", - "format": "default" - }, - { - "name": "carrier_cost", - "type": "integer", - "format": "default" - }, - { - "name": "profile", - "type": "integer", - "format": "default" - }, - { - "name": "expandable", - "type": "boolean", - "format": "default" - }, - { - "name": "emission_factor", - "type": "integer", - "format": "default" - }, - { - "name": "renewable_factor", - "type": "integer", - "format": "default" - }, - { - "name": "output_parameters", - "type": "object", - "format": "default" - } - ], - "missingValues": [ - "" - ], - "primaryKey": "name", - "foreignKeys": [ - { - "fields": "bus", - "reference": { - "resource": "bus", - "fields": "name" - } - } - ] - } - }, - { - "path": "data/elements/excess.csv", - "profile": "tabular-data-resource", - "name": "excess", - "format": "csv", - "mediatype": "text/csv", - "encoding": "utf-8", - "schema": { - "fields": [ - { - "name": "name", - "type": "string", - "format": "default" - }, - { - "name": "type", - "type": "string", - "format": "default" - }, - { - "name": "bus", - "type": "string", - "format": "default" - }, - { - "name": "marginal_cost", - "type": "integer", - "format": "default" - } - ], - "missingValues": [ - "" - ], - "primaryKey": "name", - "foreignKeys": [ - { - "fields": "bus", - "reference": { - "resource": "bus", - "fields": "name" - } - } - ] - } - }, - { - "path": "data/elements/load.csv", - "profile": "tabular-data-resource", - "name": "load", - "format": "csv", - "mediatype": "text/csv", - "encoding": "utf-8", - "schema": { - "fields": [ - { - "name": "name", - "type": "string", - "format": "default" - }, - { - "name": "amount", - "type": "integer", - "format": "default" - }, - { - "name": "profile", - "type": "string", - "format": "default" - }, - { - "name": "type", - "type": "string", - "format": "default" - }, - { - "name": "bus", - "type": "string", - "format": "default" - } - ], - "missingValues": [ - "" - ], - "primaryKey": "name", - "foreignKeys": [ - { - "fields": "bus", - "reference": { - "resource": "bus", - "fields": "name" - } - }, - { - "fields": "profile", - "reference": { - "resource": "load_profile" - } - } - ] - } - }, - { - "path": "data/elements/shortage.csv", - "profile": "tabular-data-resource", - "name": "shortage", - "format": "csv", - "mediatype": "text/csv", - "encoding": "utf-8", - "schema": { - "fields": [ - { - "name": "name", - "type": "string", - "format": "default" - }, - { - "name": "type", - "type": "string", - "format": "default" - }, - { - "name": "carrier", - "type": "string", - "format": "default" - }, - { - "name": "tech", - "type": "string", - "format": "default" - }, - { - "name": "bus", - "type": "string", - "format": "default" - }, - { - "name": "marginal_cost", - "type": "integer", - "format": "default" - }, - { - "name": "capacity", - "type": "integer", - "format": "default" - } - ], - "missingValues": [ - "" - ], - "primaryKey": "name", - "foreignKeys": [ - { - "fields": "bus", - "reference": { - "resource": "bus", - "fields": "name" - } - } - ] - } - }, - { - "path": "data/elements/storage.csv", - "profile": "tabular-data-resource", - "name": "storage", - "format": "csv", - "mediatype": "text/csv", - "encoding": "utf-8", - "schema": { - "fields": [ - { - "name": "name", - "type": "string", - "format": "default" - }, - { - "name": "carrier", - "type": "string", - "format": "default" - }, - { - "name": "tech", - "type": "string", - "format": "default" - }, - { - "name": "storage_capacity", - "type": "integer", - "format": "default" - }, - { - "name": "storage_capacity_potential", - "type": "integer", - "format": "default" - }, - { - "name": "capacity", - "type": "integer", - "format": "default" - }, - { - "name": "efficiency", - "type": "number", - "format": "default" - }, - { - "name": "capacity_cost", - "type": "integer", - "format": "default" - }, - { - "name": "storage_capacity_cost", - "type": "integer", - "format": "default" - }, - { - "name": "marginal_cost", - "type": "number", - "format": "default" - }, - { - "name": "type", - "type": "string", - "format": "default" - }, - { - "name": "bus", - "type": "string", - "format": "default" - }, - { - "name": "invest_relation_output_capacity", - "type": "number", - "format": "default" - }, - { - "name": "invest_relation_input_output", - "type": "integer", - "format": "default" - }, - { - "name": "expandable", - "type": "boolean", - "format": "default" - }, - { - "name": "emission_factor", - "type": "integer", - "format": "default" - }, - { - "name": "output_parameters", - "type": "object", - "format": "default" - } - ], - "missingValues": [ - "" - ], - "primaryKey": "name", - "foreignKeys": [ - { - "fields": "bus", - "reference": { - "resource": "bus", - "fields": "name" - } - } - ] - } - }, - { - "path": "data/elements/volatile.csv", - "profile": "tabular-data-resource", - "name": "volatile", - "format": "csv", - "mediatype": "text/csv", - "encoding": "utf-8", - "schema": { - "fields": [ - { - "name": "name", - "type": "string", - "format": "default" - }, - { - "name": "type", - "type": "string", - "format": "default" - }, - { - "name": "carrier", - "type": "string", - "format": "default" - }, - { - "name": "tech", - "type": "string", - "format": "default" - }, - { - "name": "capacity", - "type": "integer", - "format": "default" - }, - { - "name": "capacity_cost", - "type": "integer", - "format": "default" - }, - { - "name": "bus", - "type": "string", - "format": "default" - }, - { - "name": "marginal_cost", - "type": "integer", - "format": "default" - }, - { - "name": "profile", - "type": "string", - "format": "default" - }, - { - "name": "expandable", - "type": "boolean", - "format": "default" - }, - { - "name": "emission_factor", - "type": "integer", - "format": "default" - }, - { - "name": "renewable_factor", - "type": "integer", - "format": "default" - }, - { - "name": "output_parameters", - "type": "object", - "format": "default" - } - ], - "missingValues": [ - "" - ], - "primaryKey": "name", - "foreignKeys": [ - { - "fields": "bus", - "reference": { - "resource": "bus", - "fields": "name" - } - }, - { - "fields": "profile", - "reference": { - "resource": "volatile_profile" - } - } - ] - } - }, - { - "path": "data/sequences/load_profile.csv", - "profile": "tabular-data-resource", - "name": "load_profile", - "format": "csv", - "mediatype": "text/csv", - "encoding": "utf-8", - "schema": { - "fields": [ - { - "name": "timeindex", - "type": "datetime", - "format": "default" - }, - { - "name": "electricity-load-profile", - "type": "number", - "format": "default" - } - ], - "missingValues": [ - "" - ] - } - }, - { - "path": "data/sequences/volatile_profile.csv", - "profile": "tabular-data-resource", - "name": "volatile_profile", - "format": "csv", - "mediatype": "text/csv", - "encoding": "utf-8", - "schema": { - "fields": [ - { - "name": "timeindex", - "type": "datetime", - "format": "default" - }, - { - "name": "wind-profile", - "type": "number", - "format": "default" - }, - { - "name": "pv-profile", - "type": "number", - "format": "default" - } - ], - "missingValues": [ - "" - ] - } - } - ] -} diff --git a/examples/scenarios/wefe_custom_attributes/data/elements/bus.csv b/examples/scenarios/wefe_custom_attributes/data/elements/bus.csv new file mode 100644 index 00000000..e1a09d7d --- /dev/null +++ b/examples/scenarios/wefe_custom_attributes/data/elements/bus.csv @@ -0,0 +1,3 @@ +name;type;balanced +solar-energy-bus;bus;True +elec-bus;bus;True diff --git a/examples/scenarios/wefe_custom_attributes/data/elements/dispatchable.csv b/examples/scenarios/wefe_custom_attributes/data/elements/dispatchable.csv new file mode 100644 index 00000000..6644d42d --- /dev/null +++ b/examples/scenarios/wefe_custom_attributes/data/elements/dispatchable.csv @@ -0,0 +1,3 @@ +name;type;carrier;tech;capacity;capacity_cost;bus;marginal_cost;carrier_cost;profile;expandable;renewable_factor;emission_factor +solar-radiation;dispatchable;solar-energy;source;0;0;solar-energy-bus;0.0;0;ghi-profile;True;; +back-up-elec;dispatchable;electricity;source;0;0;elec-bus;0.5;0;1;True;0.0;0.39 diff --git a/examples/scenarios/wefe_custom_attributes/data/elements/excess.csv b/examples/scenarios/wefe_custom_attributes/data/elements/excess.csv new file mode 100644 index 00000000..8f7a7fb9 --- /dev/null +++ b/examples/scenarios/wefe_custom_attributes/data/elements/excess.csv @@ -0,0 +1,2 @@ +name;type;bus;marginal_cost +electricity-excess;excess;elec-bus;0 diff --git a/examples/scenarios/wefe_custom_attributes/data/elements/load.csv b/examples/scenarios/wefe_custom_attributes/data/elements/load.csv new file mode 100644 index 00000000..f45090ab --- /dev/null +++ b/examples/scenarios/wefe_custom_attributes/data/elements/load.csv @@ -0,0 +1,2 @@ +name;amount;profile;type;bus +demand;1;electricity-load-profile;load;elec-bus diff --git a/examples/scenarios/wefe_custom_attributes/data/elements/pv_panel.csv b/examples/scenarios/wefe_custom_attributes/data/elements/pv_panel.csv new file mode 100644 index 00000000..1047b6fe --- /dev/null +++ b/examples/scenarios/wefe_custom_attributes/data/elements/pv_panel.csv @@ -0,0 +1,2 @@ +name;type;carrier;tech;capacity;capacity_cost;marginal_cost;carrier_cost;from_bus;to_bus;t_air;ghi;p_rpv;r_ref;n_t;t_c_ref;noct;expandable;renewable_factor;emission_factor;land_requirement_factor;water_footprint_factor +pv-panel;pv-panel;solar-energy;pv;0;0;0;0;solar-energy-bus;elec-bus;t-air;ghi;270;1000;-0.0037;25;48;False;1;0;10;5 diff --git a/examples/scenarios/wefe_custom_attributes/data/sequences/dispatchable_profile.csv b/examples/scenarios/wefe_custom_attributes/data/sequences/dispatchable_profile.csv new file mode 100644 index 00000000..a5b7ff5c --- /dev/null +++ b/examples/scenarios/wefe_custom_attributes/data/sequences/dispatchable_profile.csv @@ -0,0 +1,25 @@ +timeindex,ghi-profile,elec-profile +2019-01-01T00:00:00Z,3.47E-05,1 +2019-01-01T01:00:00Z,3.47E-05,1 +2019-01-01T02:00:00Z,3.47E-05,1 +2019-01-01T03:00:00Z,3.47E-05,1 +2019-01-01T04:00:00Z,3.47E-05,1 +2019-01-01T05:00:00Z,3.47E-05,1 +2019-01-01T06:00:00Z,3.47E-05,1 +2019-01-01T07:00:00Z,3.47E-05,1 +2019-01-01T08:00:00Z,17.453646,1 +2019-01-01T09:00:00Z,105.80229,1 +2019-01-01T10:00:00Z,213.95938,1 +2019-01-01T11:00:00Z,279.92303,1 +2019-01-01T12:00:00Z,305.4247,1 +2019-01-01T13:00:00Z,277.91446,1 +2019-01-01T14:00:00Z,208.47395,1 +2019-01-01T15:00:00Z,103.83528,1 +2019-01-01T16:00:00Z,13.256493,1 +2019-01-01T17:00:00Z,3.47E-05,1 +2019-01-01T18:00:00Z,3.47E-05,1 +2019-01-01T19:00:00Z,3.47E-05,1 +2019-01-01T20:00:00Z,3.47E-05,1 +2019-01-01T21:00:00Z,3.47E-05,1 +2019-01-01T22:00:00Z,3.47E-05,1 +2019-01-01T23:00:00Z,3.47E-05,1 diff --git a/examples/scenarios/wefe_custom_attributes/data/sequences/load_profile.csv b/examples/scenarios/wefe_custom_attributes/data/sequences/load_profile.csv new file mode 100644 index 00000000..a46558d9 --- /dev/null +++ b/examples/scenarios/wefe_custom_attributes/data/sequences/load_profile.csv @@ -0,0 +1,25 @@ +timeindex,electricity-load-profile +2019-01-01T00:00:00Z,0.000101296 +2019-01-01T01:00:00Z,9.94E-05 +2019-01-01T02:00:00Z,9.94E-05 +2019-01-01T03:00:00Z,9.75E-05 +2019-01-01T04:00:00Z,0.000103208 +2019-01-01T05:00:00Z,0.000114675 +2019-01-01T06:00:00Z,0.000131876 +2019-01-01T07:00:00Z,0.000131876 +2019-01-01T08:00:00Z,0.00012232 +2019-01-01T09:00:00Z,0.000116586 +2019-01-01T10:00:00Z,0.000114675 +2019-01-01T11:00:00Z,0.000114675 +2019-01-01T12:00:00Z,0.000114675 +2019-01-01T13:00:00Z,0.000114675 +2019-01-01T14:00:00Z,0.000114675 +2019-01-01T15:00:00Z,0.000116586 +2019-01-01T16:00:00Z,0.000128054 +2019-01-01T17:00:00Z,0.000139521 +2019-01-01T18:00:00Z,0.000141433 +2019-01-01T19:00:00Z,0.000139521 +2019-01-01T20:00:00Z,0.000141433 +2019-01-01T21:00:00Z,0.000139521 +2019-01-01T22:00:00Z,0.000128054 +2019-01-01T23:00:00Z,0.000114675 diff --git a/examples/scenarios/wefe_reverse_osmosis/data/sequences/pv_panel_profile.csv b/examples/scenarios/wefe_custom_attributes/data/sequences/pv_panel_profile.csv similarity index 100% rename from examples/scenarios/wefe_reverse_osmosis/data/sequences/pv_panel_profile.csv rename to examples/scenarios/wefe_custom_attributes/data/sequences/pv_panel_profile.csv diff --git a/examples/scenarios/wefe_custom_attributes/scripts/.gitkeep b/examples/scenarios/wefe_custom_attributes/scripts/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/examples/scenarios/wefe_pv_panel/README.md b/examples/scenarios/wefe_pv_panel/README.md deleted file mode 100644 index be203d7c..00000000 --- a/examples/scenarios/wefe_pv_panel/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Dispatch example for oemof-tabular - -Run `scripts/infer.py` from the datapackage root directory to add the -meta data file `datapackage.json` after updating the resources of the -datapackage. diff --git a/examples/scenarios/wefe_reverse_osmosis/README.md b/examples/scenarios/wefe_reverse_osmosis/README.md deleted file mode 100644 index be203d7c..00000000 --- a/examples/scenarios/wefe_reverse_osmosis/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Dispatch example for oemof-tabular - -Run `scripts/infer.py` from the datapackage root directory to add the -meta data file `datapackage.json` after updating the resources of the -datapackage. diff --git a/examples/scenarios/wefe_reverse_osmosis/data/elements/bus.csv b/examples/scenarios/wefe_reverse_osmosis/data/elements/bus.csv index 62fcfb75..da86998c 100644 --- a/examples/scenarios/wefe_reverse_osmosis/data/elements/bus.csv +++ b/examples/scenarios/wefe_reverse_osmosis/data/elements/bus.csv @@ -1,7 +1,6 @@ name;type;balanced -solar-energy-bus;bus;true -dc-elec-bus;bus;true -ac-elec-bus;bus;true -water-bus;bus;true -permeate-bus;bus;true -brine-bus;bus;true +seawater-bus;bus;True +ac-elec-bus;bus;True +dc-elec-bus;bus;True +permeate-bus;bus;True +brine-bus;bus;True diff --git a/examples/scenarios/wefe_reverse_osmosis/data/elements/conversion.csv b/examples/scenarios/wefe_reverse_osmosis/data/elements/conversion.csv index bd9fda6e..c5dbf912 100644 --- a/examples/scenarios/wefe_reverse_osmosis/data/elements/conversion.csv +++ b/examples/scenarios/wefe_reverse_osmosis/data/elements/conversion.csv @@ -1,3 +1,2 @@ -name;type;carrier;tech;capacity;capacity_cost;marginal_cost;carrier_cost;efficiency;from_bus;to_bus;expandable -inverter;conversion;electricity;inverter;0;10;0;0;0.9;dc-elec-bus;ac-elec-bus;true -r-o-unit; +name;type;carrier;tech;capacity;capacity_potential;capacity_minimum;capacity_cost;marginal_cost;carrier_cost;efficiency;from_bus;to_bus;expandable;capex;opex_fix;lifetime +inverter;conversion;electricity;inverter;0;;;9.72;0;0;0.9;dc-elec-bus;ac-elec-bus;True;100;1;20 diff --git a/examples/scenarios/wefe_reverse_osmosis/data/elements/dispatchable.csv b/examples/scenarios/wefe_reverse_osmosis/data/elements/dispatchable.csv index b506d2d3..2eb036a3 100644 --- a/examples/scenarios/wefe_reverse_osmosis/data/elements/dispatchable.csv +++ b/examples/scenarios/wefe_reverse_osmosis/data/elements/dispatchable.csv @@ -1,3 +1,3 @@ -name;type;carrier;tech;capacity;capacity_cost;bus;marginal_cost;carrier_cost;profile;output_parameters;expandable -solar-radiation;dispatchable;solar-energy;source;0;0;solar-energy-bus;0;0;ghi-profile;{};true -water-source;dispatchable;water;source;0;0;water-bus;0.5;0;1;{};true +name;type;carrier;tech;capacity;capacity_cost;bus;marginal_cost;carrier_cost;profile;expandable +seawater-source;dispatchable;water;source;0;0;seawater-bus;0.0;0;1;True +backup-electricity-grid;dispatchable;electricity;source;0;0;ac-elec-bus;0.2;0;1;True diff --git a/examples/scenarios/wefe_reverse_osmosis/data/elements/excess.csv b/examples/scenarios/wefe_reverse_osmosis/data/elements/excess.csv index a406095a..a04e0450 100644 --- a/examples/scenarios/wefe_reverse_osmosis/data/elements/excess.csv +++ b/examples/scenarios/wefe_reverse_osmosis/data/elements/excess.csv @@ -1,3 +1,3 @@ name;type;bus;marginal_cost -electricity-excess;excess;elec-bus;0 -water-production;excess;water-bus;0 +electricity-excess;excess;ac-elec-bus;0 +brine-excess;excess;brine-bus;0 diff --git a/examples/scenarios/wefe_reverse_osmosis/data/elements/load.csv b/examples/scenarios/wefe_reverse_osmosis/data/elements/load.csv index f45090ab..ee0075ee 100644 --- a/examples/scenarios/wefe_reverse_osmosis/data/elements/load.csv +++ b/examples/scenarios/wefe_reverse_osmosis/data/elements/load.csv @@ -1,2 +1,3 @@ -name;amount;profile;type;bus -demand;1;electricity-load-profile;load;elec-bus +type;profile;name;region;year;bus;amount +load;permeate;permeate-demand;DE;2021;permeate-bus;1 +load;elec;elec-demand;DE;2021;ac-elec-bus;1 diff --git a/examples/scenarios/wefe_reverse_osmosis/data/elements/mimo.csv b/examples/scenarios/wefe_reverse_osmosis/data/elements/mimo.csv new file mode 100644 index 00000000..cec7653c --- /dev/null +++ b/examples/scenarios/wefe_reverse_osmosis/data/elements/mimo.csv @@ -0,0 +1,2 @@ +type;name;carrier;tech;expandable;primary;capacity;capacity_cost;from_bus_0;from_bus_1;to_bus_0;to_bus_1;conversion_factor_seawater-bus;conversion_factor_ac-elec-bus;conversion_factor_permeate-bus;conversion_factor_brine-bus;capex;opex_fix;lifetime +mimo;mimo;water;mimo;True;permeate-bus;0;287.4;seawater-bus;ac-elec-bus;permeate-bus;brine-bus;4.0;5.0;1.0;1.5;1000.0;50.0;5.0 diff --git a/examples/scenarios/wefe_reverse_osmosis/data/elements/pv_panel.csv b/examples/scenarios/wefe_reverse_osmosis/data/elements/pv_panel.csv deleted file mode 100644 index 813e2dbd..00000000 --- a/examples/scenarios/wefe_reverse_osmosis/data/elements/pv_panel.csv +++ /dev/null @@ -1,2 +0,0 @@ -name;type;carrier;tech;capacity;capacity_cost;marginal_cost;carrier_cost;from_bus;to_bus;t_air;ghi;p_rpv;r_ref;n_t;t_c_ref;noct;expandable -pv-panel;pv-panel;solar-energy;pv;0;100;0;0;solar-energy-bus;elec-bus;t-air;ghi;270;1000;-0.0037;25;48;true diff --git a/examples/scenarios/wefe_reverse_osmosis/data/elements/storage.csv b/examples/scenarios/wefe_reverse_osmosis/data/elements/storage.csv index e3ef16c0..90367f18 100644 --- a/examples/scenarios/wefe_reverse_osmosis/data/elements/storage.csv +++ b/examples/scenarios/wefe_reverse_osmosis/data/elements/storage.csv @@ -1,2 +1,3 @@ -name;carrier;tech;storage_capacity;storage_capacity_potential;capacity;efficiency;capacity_cost;storage_capacity_cost;marginal_cost;type;bus;invest_relation_output_capacity;invest_relation_input_output;expandable -battery-storage;lithium;battery;0;;0;0.92;0;100;0;storage;ac-elec-bus;0.1;1;True +name;carrier;tech;storage_capacity;storage_capacity_potential;capacity;efficiency;capacity_cost;storage_capacity_cost;marginal_cost;type;bus;invest_relation_output_capacity;expandable;capex;opex_fix;lifetime;land_requirement;output_parameters +battery-storage;electricity;battery;0;;0;0.92;0;44.04;0;storage;ac-elec-bus;1.0;True;350;8;15;2;"{""custom_attributes"": {""land_requirement"": 2}}" +water-storage;water;storage;0;;0;0.99;0;8.1;0;storage;permeate-bus;1.0;True;70;2;20;2;"{""custom_attributes"": {""land_requirement"": 2}}" diff --git a/examples/scenarios/wefe_reverse_osmosis/data/elements/volatile.csv b/examples/scenarios/wefe_reverse_osmosis/data/elements/volatile.csv new file mode 100644 index 00000000..843d7386 --- /dev/null +++ b/examples/scenarios/wefe_reverse_osmosis/data/elements/volatile.csv @@ -0,0 +1,2 @@ +name;type;carrier;tech;capacity;capacity_cost;bus;marginal_cost;profile;expandable;capex;opex_fix;lifetime +pv;volatile;electricity;pv;0.0;97.18;dc-elec-bus;0.0;pv-profile;True;1000.0;10.0;20.0 diff --git a/examples/scenarios/wefe_reverse_osmosis/data/sequences/dispatchable_profile.csv b/examples/scenarios/wefe_reverse_osmosis/data/sequences/dispatchable_profile.csv deleted file mode 100644 index e6ef6b71..00000000 --- a/examples/scenarios/wefe_reverse_osmosis/data/sequences/dispatchable_profile.csv +++ /dev/null @@ -1,25 +0,0 @@ -timeindex,ghi-profile,elec-profile,water-profile -2019-01-01T00:00:00Z,3.47E-05,1,1 -2019-01-01T01:00:00Z,3.47E-05,1,1 -2019-01-01T02:00:00Z,3.47E-05,1,1 -2019-01-01T03:00:00Z,3.47E-05,1,1 -2019-01-01T04:00:00Z,3.47E-05,1,1 -2019-01-01T05:00:00Z,3.47E-05,1,3 -2019-01-01T06:00:00Z,3.47E-05,1,5 -2019-01-01T07:00:00Z,3.47E-05,1,7 -2019-01-01T08:00:00Z,17.453646,1,12 -2019-01-01T09:00:00Z,105.80229,1,6 -2019-01-01T10:00:00Z,213.95938,1,4 -2019-01-01T11:00:00Z,279.92303,1,4 -2019-01-01T12:00:00Z,305.4247,1,9 -2019-01-01T13:00:00Z,277.91446,1,14 -2019-01-01T14:00:00Z,208.47395,1,8 -2019-01-01T15:00:00Z,103.83528,1,3 -2019-01-01T16:00:00Z,13.256493,1,4 -2019-01-01T17:00:00Z,3.47E-05,1,4 -2019-01-01T18:00:00Z,3.47E-05,1,9 -2019-01-01T19:00:00Z,3.47E-05,1,10 -2019-01-01T20:00:00Z,3.47E-05,1,6 -2019-01-01T21:00:00Z,3.47E-05,1,5 -2019-01-01T22:00:00Z,3.47E-05,1,3 -2019-01-01T23:00:00Z,3.47E-05,1,2 diff --git a/examples/scenarios/wefe_reverse_osmosis/data/sequences/load_profile.csv b/examples/scenarios/wefe_reverse_osmosis/data/sequences/load_profile.csv index a46558d9..0d3de091 100644 --- a/examples/scenarios/wefe_reverse_osmosis/data/sequences/load_profile.csv +++ b/examples/scenarios/wefe_reverse_osmosis/data/sequences/load_profile.csv @@ -1,25 +1,4 @@ -timeindex,electricity-load-profile -2019-01-01T00:00:00Z,0.000101296 -2019-01-01T01:00:00Z,9.94E-05 -2019-01-01T02:00:00Z,9.94E-05 -2019-01-01T03:00:00Z,9.75E-05 -2019-01-01T04:00:00Z,0.000103208 -2019-01-01T05:00:00Z,0.000114675 -2019-01-01T06:00:00Z,0.000131876 -2019-01-01T07:00:00Z,0.000131876 -2019-01-01T08:00:00Z,0.00012232 -2019-01-01T09:00:00Z,0.000116586 -2019-01-01T10:00:00Z,0.000114675 -2019-01-01T11:00:00Z,0.000114675 -2019-01-01T12:00:00Z,0.000114675 -2019-01-01T13:00:00Z,0.000114675 -2019-01-01T14:00:00Z,0.000114675 -2019-01-01T15:00:00Z,0.000116586 -2019-01-01T16:00:00Z,0.000128054 -2019-01-01T17:00:00Z,0.000139521 -2019-01-01T18:00:00Z,0.000141433 -2019-01-01T19:00:00Z,0.000139521 -2019-01-01T20:00:00Z,0.000141433 -2019-01-01T21:00:00Z,0.000139521 -2019-01-01T22:00:00Z,0.000128054 -2019-01-01T23:00:00Z,0.000114675 +timeindex;permeate;elec +2021-01-01T00:00:00Z;10;20 +2021-01-01T01:00:00Z;20;40 +2021-01-01T02:00:00Z;30;60 diff --git a/examples/scenarios/wefe_reverse_osmosis/data/sequences/volatile_profile.csv b/examples/scenarios/wefe_reverse_osmosis/data/sequences/volatile_profile.csv new file mode 100644 index 00000000..7f346c9c --- /dev/null +++ b/examples/scenarios/wefe_reverse_osmosis/data/sequences/volatile_profile.csv @@ -0,0 +1,4 @@ +timeindex;pv-profile +2021-01-01T00:00:00Z;0.2 +2021-01-01T01:00:00Z;0.5 +2021-01-01T02:00:00Z;0.8 diff --git a/examples/scenarios/wefe_reverse_osmosis/datapackage.json b/examples/scenarios/wefe_reverse_osmosis/datapackage.json deleted file mode 100644 index 024ea3cc..00000000 --- a/examples/scenarios/wefe_reverse_osmosis/datapackage.json +++ /dev/null @@ -1,450 +0,0 @@ -{ - "profile": "tabular-data-package", - "name": "uzbek_scenario1_status_quo", - "oemof_tabular_version": "0.0.4dev", - "resources": [ - { - "path": "data/elements/bus.csv", - "profile": "tabular-data-resource", - "name": "bus", - "format": "csv", - "mediatype": "text/csv", - "encoding": "utf-8", - "schema": { - "fields": [ - { - "name": "name", - "type": "string", - "format": "default" - }, - { - "name": "type", - "type": "string", - "format": "default" - }, - { - "name": "balanced", - "type": "boolean", - "format": "default" - } - ], - "missingValues": [ - "" - ], - "primaryKey": "name", - "foreignKeys": [] - } - }, - { - "path": "data/elements/dispatchable.csv", - "profile": "tabular-data-resource", - "name": "dispatchable", - "format": "csv", - "mediatype": "text/csv", - "encoding": "utf-8", - "schema": { - "fields": [ - { - "name": "name", - "type": "string", - "format": "default" - }, - { - "name": "type", - "type": "string", - "format": "default" - }, - { - "name": "carrier", - "type": "string", - "format": "default" - }, - { - "name": "tech", - "type": "string", - "format": "default" - }, - { - "name": "capacity", - "type": "integer", - "format": "default" - }, - { - "name": "capacity_cost", - "type": "number", - "format": "default" - }, - { - "name": "bus", - "type": "string", - "format": "default" - }, - { - "name": "marginal_cost", - "type": "number", - "format": "default" - }, - { - "name": "carrier_cost", - "type": "number", - "format": "default" - }, - { - "name": "profile", - "type": "string", - "format": "default" - }, - { - "name": "output_parameters", - "type": "object", - "format": "default" - }, - { - "name": "expandable", - "type": "boolean", - "format": "default" - } - ], - "missingValues": [ - "" - ], - "primaryKey": "name", - "foreignKeys": [ - { - "fields": "bus", - "reference": { - "resource": "bus", - "fields": "name" - } - }, - { - "fields": "profile", - "reference": { - "resource": "dispatchable_profile" - } - } - ] - } - }, - { - "path": "data/elements/excess.csv", - "profile": "tabular-data-resource", - "name": "excess", - "format": "csv", - "mediatype": "text/csv", - "encoding": "utf-8", - "schema": { - "fields": [ - { - "name": "name", - "type": "string", - "format": "default" - }, - { - "name": "type", - "type": "string", - "format": "default" - }, - { - "name": "bus", - "type": "string", - "format": "default" - }, - { - "name": "marginal_cost", - "type": "integer", - "format": "default" - } - ], - "missingValues": [ - "" - ], - "primaryKey": "name", - "foreignKeys": [ - { - "fields": "bus", - "reference": { - "resource": "bus", - "fields": "name" - } - } - ] - } - }, - { - "path": "data/elements/load.csv", - "profile": "tabular-data-resource", - "name": "load", - "format": "csv", - "mediatype": "text/csv", - "encoding": "utf-8", - "schema": { - "fields": [ - { - "name": "name", - "type": "string", - "format": "default" - }, - { - "name": "amount", - "type": "integer", - "format": "default" - }, - { - "name": "profile", - "type": "string", - "format": "default" - }, - { - "name": "type", - "type": "string", - "format": "default" - }, - { - "name": "bus", - "type": "string", - "format": "default" - } - ], - "missingValues": [ - "" - ], - "primaryKey": "name", - "foreignKeys": [ - { - "fields": "bus", - "reference": { - "resource": "bus", - "fields": "name" - } - }, - { - "fields": "profile", - "reference": { - "resource": "load_profile" - } - } - ] - } - }, - { - "path": "data/elements/pv_panel.csv", - "profile": "tabular-data-resource", - "name": "pv_panel", - "format": "csv", - "mediatype": "text/csv", - "encoding": "utf-8", - "schema": { - "fields": [ - { - "name": "name", - "type": "string", - "format": "default" - }, - { - "name": "type", - "type": "string", - "format": "default" - }, - { - "name": "carrier", - "type": "string", - "format": "default" - }, - { - "name": "tech", - "type": "string", - "format": "default" - }, - { - "name": "capacity", - "type": "integer", - "format": "default" - }, - { - "name": "capacity_cost", - "type": "integer", - "format": "default" - }, - { - "name": "marginal_cost", - "type": "number", - "format": "default" - }, - { - "name": "carrier_cost", - "type": "number", - "format": "default" - }, - { - "name": "from_bus", - "type": "string", - "format": "default" - }, - { - "name": "to_bus", - "type": "string", - "format": "default" - }, - { - "name": "t_air", - "type": "string", - "format": "default" - }, - { - "name": "ghi", - "type": "string", - "format": "default" - }, - { - "name": "p_rpv", - "type": "number", - "format": "default" - }, - { - "name": "r_ref", - "type": "number", - "format": "default" - }, - { - "name": "n_t", - "type": "number", - "format": "default" - }, - { - "name": "t_c_ref", - "type": "number", - "format": "default" - }, - { - "name": "noct", - "type": "number", - "format": "default" - }, - { - "name": "expandable", - "type": "boolean", - "format": "default" - } - ], - "missingValues": [ - "" - ], - "primaryKey": "name", - "foreignKeys": [ - { - "fields": "from_bus", - "reference": { - "resource": "bus", - "fields": "name" - } - }, - { - "fields": "to_bus", - "reference": { - "resource": "bus", - "fields": "name" - } - }, - { - "fields": "t_air", - "reference": { - "resource": "pv_panel_profile" - } - }, - { - "fields": "ghi", - "reference": { - "resource": "pv_panel_profile" - } - } - ] - } - }, - { - "path": "data/sequences/dispatchable_profile.csv", - "profile": "tabular-data-resource", - "name": "dispatchable_profile", - "format": "csv", - "mediatype": "text/csv", - "encoding": "utf-8", - "schema": { - "fields": [ - { - "name": "timeindex", - "type": "datetime", - "format": "default" - }, - { - "name": "ghi-profile", - "type": "number", - "format": "default" - }, - { - "name": "elec-profile", - "type": "number", - "format": "default" - } - ], - "missingValues": [ - "" - ] - } - }, - { - "path": "data/sequences/load_profile.csv", - "profile": "tabular-data-resource", - "name": "load_profile", - "format": "csv", - "mediatype": "text/csv", - "encoding": "utf-8", - "schema": { - "fields": [ - { - "name": "timeindex", - "type": "datetime", - "format": "default" - }, - { - "name": "electricity-load-profile", - "type": "number", - "format": "default" - } - ], - "missingValues": [ - "" - ] - } - }, - { - "path": "data/sequences/pv_panel_profile.csv", - "profile": "tabular-data-resource", - "name": "pv_panel_profile", - "format": "csv", - "mediatype": "text/csv", - "encoding": "utf-8", - "schema": { - "fields": [ - { - "name": "timeindex", - "type": "datetime", - "format": "default" - }, - { - "name": "ghi", - "type": "number", - "format": "default" - }, - { - "name": "t-air", - "type": "number", - "format": "default" - } - ], - "missingValues": [ - "" - ] - } - } - ] -} From 2ba7d32e35932bf33458f1c165eb30c4fe8dd285 Mon Sep 17 00:00:00 2001 From: "ciara.dunks" Date: Thu, 13 Jun 2024 16:30:03 +0200 Subject: [PATCH 10/20] Modified gitignore to include all scenarios --- .gitignore | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.gitignore b/.gitignore index ed2da80f..69764952 100644 --- a/.gitignore +++ b/.gitignore @@ -149,7 +149,4 @@ cython_debug/ # remove it from .gitignore and push this change # make changes to your scenario then push them # re-add the scenario to .gitignore and push -examples/scenarios/general_basic -examples/scenarios/general_add_cost_inputs -examples/scenarios/general_custom_attributes -examples/scenarios/general_constraints \ No newline at end of file +examples/scenarios \ No newline at end of file From eb8cafd4c4c7ee92a7835f28fbe38250504c18ff Mon Sep 17 00:00:00 2001 From: paulapreuss <96133467+paulapreuss@users.noreply.github.com> Date: Wed, 12 Jun 2024 16:34:16 +0200 Subject: [PATCH 11/20] add variable, opex and annuity costs to results dataframe --- .../datapackage/post_processing.py | 59 ++++++++++++++++--- 1 file changed, 52 insertions(+), 7 deletions(-) diff --git a/src/oemof_tabular_plugins/datapackage/post_processing.py b/src/oemof_tabular_plugins/datapackage/post_processing.py index 12343cb2..9feb5c42 100644 --- a/src/oemof_tabular_plugins/datapackage/post_processing.py +++ b/src/oemof_tabular_plugins/datapackage/post_processing.py @@ -5,12 +5,8 @@ import oemof.solph as solph import numpy as np - # ToDo: check to see if the storage optimized input/output (invest_out) and # optimized capacity (invest) are saved correctly -# ToDo: see if variable costs are provided as a raw output, and if not -# they should be calculated with: if a flow is into component, multiply flow by carrier cost -# and if the flow is out of component, multiply it by marginal cost # ToDo: is another raw output from the results is investment costs? or does this have to be calculated? RAW_OUTPUTS = ["investments"] PROCESSED_RAW_OUTPUTS = ["flow_min", "flow_max", "aggregated_flow"] @@ -41,6 +37,12 @@ def compute_total_capacity(results_df): return results_df.capacity + results_df.investments +def compute_total_annuity(results_df): + """Calculates total capacity by adding existing capacity (capacity) to optimized capacity (investments)""" + # TODO fix this to use storage_capacity_cost for the storage (or fix on the storage side) + return results_df.capacity_cost + results_df.investments + + def compute_upfront_investment_costs(results_df): # ToDo: check for storage if investments is based on correct parameter """Calculates investment costs by multiplying capex with optimized capacity (investments)""" @@ -50,6 +52,28 @@ def compute_upfront_investment_costs(results_df): return results_df.capex * results_df.investments +def compute_opex_fix_costs(results_df): + """Calculates yearly opex costs by multiplying opex with optimized capacity (investments)""" + if "opex_fix" not in results_df.index: + return None + else: + return results_df.opex_fix * results_df.investments + + +def compute_variable_costs(results_df): + """Calculates variable costs by multiplying the marginal cost by the aggregated flow if the direction is out, + and by the carrier cost if the direction is in. The total marginal costs for each asset correspond to the sum + of the marginal costs for the in- and output flows""" + if results_df.name[1] == "out": + if "marginal_cost" not in results_df.index: + return None + return results_df.marginal_cost * results_df.aggregated_flow + elif results_df.name[1] == "in": + if "carrier_cost" not in results_df.index: + return None + return results_df.carrier_cost * results_df.aggregated_flow + + def compute_renewable_generation(results_df): """Calculates renewable generation by multiplying aggregated flow by renewable factor""" if "renewable_factor" not in results_df.index: @@ -105,21 +129,42 @@ def _check_arguments(df, column_names, col_name): "column_name": "total_capacity", "operation": compute_total_capacity, "description": "The total capacity is calculated by adding the optimized capacity (investments) " - "to the existing capacity (capacity)", + "to the existing capacity (capacity)", "argument_names": ["investments", "capacity"], }, + { + "column_name": "total_annuity", + "operation": compute_total_annuity, + "description": "Total annuity is calculated by multiplying the optimized capacity " + "by the capacity cost (annuity considering CAPEX, OPEX and WACC)", + "argument_names": ["investments", "capacity_cost"], + }, { "column_name": "upfront_investment_costs", "operation": compute_upfront_investment_costs, "description": "Upfront investment costs are calculated by multiplying the optimized capacity " - "by the CAPEX", + "by the CAPEX", "argument_names": ["investments", "capex"], }, + { + "column_name": "total_opex_fix_costs", + "operation": compute_opex_fix_costs, + "description": "Operation and maintenance costs are calculated by multiplying the optimized capacity " + "by the OPEX", + "argument_names": ["aggregated_flow", "marginal_cost", "carrier_cost"], + }, + { + "column_name": "total_variable_costs", + "operation": compute_variable_costs, + "description": "Variable costs are calculated by multiplying the total flow " + "by the marginal/carrier costs", + "argument_names": ["aggregated_flow", "marginal_cost", "carrier_cost"], + }, { "column_name": "renewable_generation", "operation": compute_renewable_generation, "description": "The renewable generation for each component is computed from the flow and the " - "renewable factor.", + "renewable factor.", "argument_names": [ "aggregated_flow", "renewable_factor", From 67bd3c09e6f3f1f5c185291036e3ba759d53ffce Mon Sep 17 00:00:00 2001 From: paulapreuss <96133467+paulapreuss@users.noreply.github.com> Date: Thu, 13 Jun 2024 16:25:21 +0200 Subject: [PATCH 12/20] add new functions to get sub-tables from results dataframe (WIP) --- .../post_processing/post_processing.py | 64 +++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/src/oemof_tabular_plugins/general/post_processing/post_processing.py b/src/oemof_tabular_plugins/general/post_processing/post_processing.py index e3cd199d..1d5be0e5 100644 --- a/src/oemof_tabular_plugins/general/post_processing/post_processing.py +++ b/src/oemof_tabular_plugins/general/post_processing/post_processing.py @@ -656,6 +656,58 @@ def create_costs_table(all_scalars, results, capacities_df, storage_capacities_d return costs_df +# ------ New post-processing to create tables ------ +# This dictionary contains groups of columns that should be extracted from the df_results to generate a clearer overview + +# TODO add a column for planned capacity (not optimizable but including costs) in capacities if it gets properly implemented +# (planned capacity can be set by setting capacity_minimum == capacity_potential and dispatchable = True +RESULT_TABLE_COLUMNS = {"costs": ["upfront_investment_costs", "total_annuity", "total_variable_costs"], + "capacities": ["capacity", "storage_capacity", "capacity_potential", + "storage_capacity_potential", "total_capacity"] + } + + +def extract_table_from_results(df_results, columns): + """ Extracts a set of columns from the df_results DataFrame. The lists of columns to generate these tables can be + defined in RESULT_TABLE_COLUMNS + :param df_results: multiindex results dataframe with additional columns (OTPCalculator.df_results) + :param columns: list of columns to generate the sub-table (should be defined in RESULT_TABLE_COLUMNS) + :return: dataframe containing the columns specified in columns, if present in df_results + """ + missing_columns = [] + for col in columns: + try: + df_results[col] + except KeyError: + # If the key is not in the df_results dataframe, log it as a warning and pop the column from the list + logging.warning(f"The column {col} was not found in the results DataFrame, will be skipped in the subtable") + missing_columns.append(col) + + columns = [col for col in columns if col not in missing_columns] + results_table = df_results[columns].copy() + # TODO some of these names may be confusing because they are just the columns, maybe there should be a + # verbose parameter in CALCULATED OUTPUTS that we can then also use here + results_table.columns = [col.title().replace("_", " ") for col in results_table.columns] + return results_table + + +def create_kpi_df(df_results): + # TODO these should also be calculated in a similar way to CALCULATED_OUTPUTS, where the necessary columns are given + # as input and the code checks that the necessary columns exist before performing the calculations. But instead of + # adding the column to the dataframe, the result is just used here to construct the system-wide kpi table + pass + + +def save_table_to_csv(table, results_path, filename): + """ Saves a DataFrame to a .csv file """ + filepath = os.path.join(results_path, filename) + table.to_csv(filepath) + + +# TODO figure out the best table/display for storage results + + +# -------------------------------------------------- class OTPCalculator(Calculator): def __init__(self, input_parameters, energy_system, dp_path): @@ -684,6 +736,18 @@ def post_processing(params, es, results_path, dp_path): results_by_flow = calculator.df_results results_by_flow.to_csv(results_path + "/results_by_flow.csv", index=True) + # get sub-tables from results dataframe + cost_table = extract_table_from_results(calculator.df_results, RESULT_TABLE_COLUMNS["costs"]) + capacities_table = extract_table_from_results(calculator.df_results, RESULT_TABLE_COLUMNS["capacities"]) + + # save tables to csv files + tables_to_save = { + "costs.csv": cost_table, + "capacities.csv": capacities_table + } + for filename, table in tables_to_save.items(): + save_table_to_csv(table, results_path, filename) + # ----- OLD POST-PROCESSING - TO BE DELETED ONCE CERTAIN ----- # calculate scalars using functions from clc module From 1af2e572a7c43341018957216f92f6510c60421b Mon Sep 17 00:00:00 2001 From: paulapreuss <96133467+paulapreuss@users.noreply.github.com> Date: Thu, 13 Jun 2024 16:27:01 +0200 Subject: [PATCH 13/20] expand raw inputs list capacity potential should be included in the capacities sub-table, so we already add it to the "mother" dataframe to be extracted later --- src/oemof_tabular_plugins/datapackage/post_processing.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/oemof_tabular_plugins/datapackage/post_processing.py b/src/oemof_tabular_plugins/datapackage/post_processing.py index 9feb5c42..0a8b2b2c 100644 --- a/src/oemof_tabular_plugins/datapackage/post_processing.py +++ b/src/oemof_tabular_plugins/datapackage/post_processing.py @@ -16,8 +16,10 @@ "capacity_cost", "storage_capacity_cost", "capacity", + "capacity_potential", "expandable", "storage_capacity", + "storage_capacity_potential", "min_capacity", "max_capacity", "efficiency", From 8be775a2c05391c46a5268ac37a215a385284977 Mon Sep 17 00:00:00 2001 From: "ciara.dunks" Date: Mon, 17 Jun 2024 15:17:39 +0200 Subject: [PATCH 14/20] set numpy version==1.26.0 in pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 2f2e8dc6..f8f06182 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [build-system] -requires = ["setuptools", "oemof.tabular"] +requires = ["setuptools", "oemof.tabular", "numpy==1.26.0"] build-backend = "setuptools.build_meta" [project] From c8446155f9bebc9539ec73a53d4ca4dbf77d9f8d Mon Sep 17 00:00:00 2001 From: "ciara.dunks" Date: Tue, 18 Jun 2024 10:46:10 +0200 Subject: [PATCH 15/20] Added KPI results table and saved as CSV --- .../datapackage/post_processing.py | 198 ++++++++++++++++-- .../post_processing/post_processing.py | 64 +++--- 2 files changed, 221 insertions(+), 41 deletions(-) diff --git a/src/oemof_tabular_plugins/datapackage/post_processing.py b/src/oemof_tabular_plugins/datapackage/post_processing.py index 0a8b2b2c..ff9b82d5 100644 --- a/src/oemof_tabular_plugins/datapackage/post_processing.py +++ b/src/oemof_tabular_plugins/datapackage/post_processing.py @@ -17,11 +17,10 @@ "storage_capacity_cost", "capacity", "capacity_potential", + "capacity_minimum", "expandable", "storage_capacity", "storage_capacity_potential", - "min_capacity", - "max_capacity", "efficiency", "capex", "opex_fix", @@ -64,8 +63,8 @@ def compute_opex_fix_costs(results_df): def compute_variable_costs(results_df): """Calculates variable costs by multiplying the marginal cost by the aggregated flow if the direction is out, - and by the carrier cost if the direction is in. The total marginal costs for each asset correspond to the sum - of the marginal costs for the in- and output flows""" + and by the carrier cost if the direction is in. The total marginal costs for each asset correspond to the sum + of the marginal costs for the in- and output flows""" if results_df.name[1] == "out": if "marginal_cost" not in results_df.index: return None @@ -116,6 +115,65 @@ def compute_water_footprint(results_df): return results_df.aggregated_flow * results_df.water_footprint_factor +def compute_total_system_cost(results_df): + """Calculates the total system cost by summing the total annuity and total variable costs + for each component""" + total_system_cost = ( + results_df["total_annuity"].sum() + results_df["total_variable_costs"].sum() + ) + return total_system_cost + + +def compute_total_upfront_investments(results_df): + """Calculates the total upfront investments by summing the upfront investments for each component""" + total_upfront_investments = results_df["upfront_investment_costs"].sum() + return total_upfront_investments + + +def compute_total_emissions(results_df): + """Calculates the total upfront investments by summing the upfront investments for each component""" + total_emissions = results_df["co2_emissions"].sum() + return total_emissions + + +def compute_system_additional_land_requirement(results_df): + """Calculates the additional land requirement from optimized capacities by summing the additional + land requirement for each component""" + additional_land_requirement = results_df["additional_land_requirement"].sum() + return additional_land_requirement + + +def compute_system_total_land_requirement(results_df): + """Calculates the total land requirement by summing the total land requirement for each component""" + total_land_requirement = results_df["total_land_requirement"].sum() + return total_land_requirement + + +def compute_total_water_footprint(results_df): + """Calculates the total water footprint by summing the total water footprint for each component""" + total_water_footprint = results_df["water_footprint"].sum() + return total_water_footprint + + +def compute_specific_system_cost(results_df): + """Calculates the total upfront investments by summing the upfront investments for each component""" + # ToDo: will need to be adapted when non-energetic loads are included - for now only electricity is + # considered but this is not correct + # ToDo: NEED TO CHANGE: somehow select only electricity components to calculate LCOE - discuss with Paula + total_load = 0 + total_system_cost = ( + results_df["total_annuity"].sum() + results_df["total_variable_costs"].sum() + ) + for index, row in results_df.iterrows(): + # This is a quick fix to not include water - need to talk to Julian about how other demands should + # be considered + if index[4] == "load" and index[3] == "electricity": + print("asset type in index") + total_load += row.get("aggregated_flow", 0) + specific_system_cost = total_system_cost / total_load + return specific_system_cost + + def _check_arguments(df, column_names, col_name): """Check that all required argument are present in the DataFrame columns""" for arg in column_names: @@ -131,64 +189,127 @@ def _check_arguments(df, column_names, col_name): "column_name": "total_capacity", "operation": compute_total_capacity, "description": "The total capacity is calculated by adding the optimized capacity (investments) " - "to the existing capacity (capacity)", + "to the existing capacity (capacity)", "argument_names": ["investments", "capacity"], }, { "column_name": "total_annuity", "operation": compute_total_annuity, "description": "Total annuity is calculated by multiplying the optimized capacity " - "by the capacity cost (annuity considering CAPEX, OPEX and WACC)", + "by the capacity cost (annuity considering CAPEX, OPEX and WACC)", "argument_names": ["investments", "capacity_cost"], }, { "column_name": "upfront_investment_costs", "operation": compute_upfront_investment_costs, "description": "Upfront investment costs are calculated by multiplying the optimized capacity " - "by the CAPEX", + "by the CAPEX", "argument_names": ["investments", "capex"], }, { "column_name": "total_opex_fix_costs", "operation": compute_opex_fix_costs, "description": "Operation and maintenance costs are calculated by multiplying the optimized capacity " - "by the OPEX", + "by the OPEX", "argument_names": ["aggregated_flow", "marginal_cost", "carrier_cost"], }, { "column_name": "total_variable_costs", "operation": compute_variable_costs, "description": "Variable costs are calculated by multiplying the total flow " - "by the marginal/carrier costs", + "by the marginal/carrier costs", "argument_names": ["aggregated_flow", "marginal_cost", "carrier_cost"], }, { "column_name": "renewable_generation", "operation": compute_renewable_generation, "description": "The renewable generation for each component is computed from the flow and the " - "renewable factor.", + "renewable factor.", "argument_names": [ "aggregated_flow", "renewable_factor", ], }, { - "column_name": "cO2_emmissions", + "column_name": "co2_emissions", "operation": compute_co2_emissions, - "description": "CO2 emissions are calculated from the flow and the emission factor.", + "description": "CO2 emissions are calculated from the flow and the emission factor", "argument_names": ["aggregated_flow", "emission_factor"], }, { "column_name": "additional_land_requirement", "operation": compute_additional_land_requirement, - "description": "The additional land requirement calculates the land required for the optimized capacities.", - "argument_names": ["investments", "emission_factor"], + "description": "The additional land requirement calculates the land required for the optimized capacities", + "argument_names": ["investments", "land_requirement_factor"], }, { "column_name": "total_land_requirement", "operation": compute_total_land_requirement, - "description": "The total land requirement calculates the land required for the total capacities.", - "argument_names": ["total_capacity", "emission_factor"], + "description": "The total land requirement calculates the land required for the total capacities", + "argument_names": ["total_capacity", "land_requirement_factor"], + }, + { + "column_name": "water_footprint", + "operation": compute_water_footprint, + "description": "The water footprint calculates the water footprint for the aggregated flows of each component", + "argument_names": ["aggregated_flow", "water_footprint_factor"], + }, +] + +# ToDo: turn dict into a class (see CALCULATED_OUTPUTS) and decide where this belongs - either here or in processing +# or maybe these can be joined with CALCULATED_OUTPUTS and there is another parameter that defines if it is a calculation +# per component (to be added to df_results) or a calculation for the whole system (e.g. LCOE, total emissions etc). +# Probably this should be included with the other CALCULATED_OUTPUTS eventually, but should ask PF +CALCULATED_KPIS = [ + { + "column_name": "total_system_cost", + "operation": compute_total_system_cost, + "description": "The total system cost is calculated by summing up the total annuity (CAPEX and OPEX fix " + "multipled by the optimized capacity) and the total variable costs (including carrier and" + "marginal costs) for each component and then summing the values up to get the total value" + "for the system", + "argument_names": ["total_annuity", "total_variable_costs"], + }, + { + "column_name": "total_upfront_investments", + "operation": compute_total_upfront_investments, + "description": "The total upfront investments value is calculated by summing the upfront investment" + "costs for each component", + "argument_names": ["upfront_investment_costs"], + }, + { + "column_name": "specific_system_cost", + "operation": compute_specific_system_cost, + "description": "T", + "argument_names": ["aggregated_flow", "total_annuity", "total_variable_costs"], + }, + { + "column_name": "total_emissions", + "operation": compute_total_emissions, + "description": "The total emissions is calculated by summing the c02 emissions " + "for each component", + "argument_names": ["co2_emissions"], + }, + { + "column_name": "additional_land_requirement", + "operation": compute_system_additional_land_requirement, + "description": "The total additional land requirement is calculated by summing the additional land requirement " + "for each component", + "argument_names": ["additional_land_requirement"], + }, + { + "column_name": "total_land_requirement", + "operation": compute_system_total_land_requirement, + "description": "The total land requirement is calculated by summing the total land requirement " + "for each component", + "argument_names": ["total_land_requirement"], + }, + { + "column_name": "total_water_footprint", + "operation": compute_total_water_footprint, + "description": "The total water footprint is calculated by summing the water footprint required " + "for each component", + "argument_names": ["water_footprint"], }, ] @@ -200,6 +321,14 @@ def _check_arguments(df, column_names, col_name): else: calc["docstring"] = "" +# Add docstrings from function handles for documentation purposes +for calc in CALCULATED_KPIS: + func_handle = calc.get("operation", None) + if callable(func_handle): + calc["docstring"] = func_handle.__doc__ + else: + calc["docstring"] = "" + def _validate_calculation(calculation): """Check if the parameters of a calculation are there and of the right format""" @@ -511,3 +640,40 @@ def apply_calculations(results_df, calculations=CALCULATED_OUTPUTS): logging.info( f"Removed column '{var_name}' because it contains all None values." ) + + +def apply_kpi_calculations(results_df, calculations=CALCULATED_KPIS): + """Apply calculation and return a new DataFrame with the KPIs. + + Parameters + ---------- + results_df : pd.DataFrame + The input DataFrame with raw data. + calculations : list of dict + List of calculations to be applied. Each calculation is a dictionary + with keys: "column_name", "argument_names", and "operation". + + Returns + ------- + pd.DataFrame + A new DataFrame containing the calculated KPI values with var_name as the index. + """ + kpis = [] + + for calc in calculations: + _validate_calculation(calc) + var_name = calc.get("column_name") + argument_names = calc.get("argument_names", []) + func_handle = calc.get("operation") + + try: + _check_arguments(results_df, column_names=argument_names, col_name=var_name) + except AttributeError as e: + logging.warning(e) + continue + + kpi_value = func_handle(results_df) + kpis.append({"kpi": var_name, "value": kpi_value}) + + kpi_df = pd.DataFrame(kpis).set_index("kpi") + return kpi_df diff --git a/src/oemof_tabular_plugins/general/post_processing/post_processing.py b/src/oemof_tabular_plugins/general/post_processing/post_processing.py index 1d5be0e5..3bbf9b5d 100644 --- a/src/oemof_tabular_plugins/general/post_processing/post_processing.py +++ b/src/oemof_tabular_plugins/general/post_processing/post_processing.py @@ -1,17 +1,20 @@ import logging import os -import pandas as pd -import numpy as np import warnings -from oemof.tabular.postprocessing.core import Calculator + +import pandas as pd from oemof.tabular.postprocessing import calculations as clc, naming +from oemof.tabular.postprocessing.core import Calculator + from oemof_tabular_plugins.datapackage.post_processing import ( construct_dataframe_from_results, process_raw_results, process_raw_inputs, apply_calculations, + apply_kpi_calculations, ) + # ToDo: the functions below need proper testing and appropriate logging info for the user's understanding # NOTE: the post-processing module is expected to change once the main multi-index dataframe is created, so # expect a change in structure, but the calculations should not need to be changed @@ -659,16 +662,26 @@ def create_costs_table(all_scalars, results, capacities_df, storage_capacities_d # ------ New post-processing to create tables ------ # This dictionary contains groups of columns that should be extracted from the df_results to generate a clearer overview + # TODO add a column for planned capacity (not optimizable but including costs) in capacities if it gets properly implemented # (planned capacity can be set by setting capacity_minimum == capacity_potential and dispatchable = True -RESULT_TABLE_COLUMNS = {"costs": ["upfront_investment_costs", "total_annuity", "total_variable_costs"], - "capacities": ["capacity", "storage_capacity", "capacity_potential", - "storage_capacity_potential", "total_capacity"] - } +RESULT_TABLE_COLUMNS = { + "costs": ["upfront_investment_costs", "total_annuity", "total_variable_costs"], + "capacities": [ + "capacity", + "storage_capacity", + "capacity_minimum", + "capacity_potential", + "storage_capacity_potential", + "expandable", + "investments", + "total_capacity", + ], +} def extract_table_from_results(df_results, columns): - """ Extracts a set of columns from the df_results DataFrame. The lists of columns to generate these tables can be + """Extracts a set of columns from the df_results DataFrame. The lists of columns to generate these tables can be defined in RESULT_TABLE_COLUMNS :param df_results: multiindex results dataframe with additional columns (OTPCalculator.df_results) :param columns: list of columns to generate the sub-table (should be defined in RESULT_TABLE_COLUMNS) @@ -680,26 +693,23 @@ def extract_table_from_results(df_results, columns): df_results[col] except KeyError: # If the key is not in the df_results dataframe, log it as a warning and pop the column from the list - logging.warning(f"The column {col} was not found in the results DataFrame, will be skipped in the subtable") + logging.warning( + f"The column {col} was not found in the results DataFrame, will be skipped in the subtable" + ) missing_columns.append(col) columns = [col for col in columns if col not in missing_columns] results_table = df_results[columns].copy() # TODO some of these names may be confusing because they are just the columns, maybe there should be a # verbose parameter in CALCULATED OUTPUTS that we can then also use here - results_table.columns = [col.title().replace("_", " ") for col in results_table.columns] + results_table.columns = [ + col.title().replace("_", " ") for col in results_table.columns + ] return results_table -def create_kpi_df(df_results): - # TODO these should also be calculated in a similar way to CALCULATED_OUTPUTS, where the necessary columns are given - # as input and the code checks that the necessary columns exist before performing the calculations. But instead of - # adding the column to the dataframe, the result is just used here to construct the system-wide kpi table - pass - - def save_table_to_csv(table, results_path, filename): - """ Saves a DataFrame to a .csv file """ + """Saves a DataFrame to a .csv file""" filepath = os.path.join(results_path, filename) table.to_csv(filepath) @@ -715,6 +725,7 @@ def __init__(self, input_parameters, energy_system, dp_path): self.df_results = process_raw_results(self.df_results) self.df_results = process_raw_inputs(self.df_results, dp_path) apply_calculations(self.df_results) + self.kpis = apply_kpi_calculations(self.df_results) super().__init__(input_parameters, energy_system.results) @@ -734,17 +745,20 @@ def post_processing(params, es, results_path, dp_path): # print(calculator.df_results) results = es.results results_by_flow = calculator.df_results - results_by_flow.to_csv(results_path + "/results_by_flow.csv", index=True) + results_by_flow.to_csv(results_path + "/all_results_by_flow.csv", index=True) + kpis = calculator.kpis + kpis.to_csv(results_path + "/kpis.csv", index=True) # get sub-tables from results dataframe - cost_table = extract_table_from_results(calculator.df_results, RESULT_TABLE_COLUMNS["costs"]) - capacities_table = extract_table_from_results(calculator.df_results, RESULT_TABLE_COLUMNS["capacities"]) + cost_table = extract_table_from_results( + calculator.df_results, RESULT_TABLE_COLUMNS["costs"] + ) + capacities_table = extract_table_from_results( + calculator.df_results, RESULT_TABLE_COLUMNS["capacities"] + ) # save tables to csv files - tables_to_save = { - "costs.csv": cost_table, - "capacities.csv": capacities_table - } + tables_to_save = {"costs.csv": cost_table, "capacities.csv": capacities_table} for filename, table in tables_to_save.items(): save_table_to_csv(table, results_path, filename) From ab2f545099c4c127f50b3d38329133e5aa58619b Mon Sep 17 00:00:00 2001 From: "ciara.dunks" Date: Tue, 18 Jun 2024 10:55:17 +0200 Subject: [PATCH 16/20] Deleted old post-processing functions --- .../post_processing/post_processing.py | 707 +----------------- 1 file changed, 3 insertions(+), 704 deletions(-) diff --git a/src/oemof_tabular_plugins/general/post_processing/post_processing.py b/src/oemof_tabular_plugins/general/post_processing/post_processing.py index 3bbf9b5d..74fdfdca 100644 --- a/src/oemof_tabular_plugins/general/post_processing/post_processing.py +++ b/src/oemof_tabular_plugins/general/post_processing/post_processing.py @@ -14,657 +14,12 @@ apply_kpi_calculations, ) - -# ToDo: the functions below need proper testing and appropriate logging info for the user's understanding -# NOTE: the post-processing module is expected to change once the main multi-index dataframe is created, so -# expect a change in structure, but the calculations should not need to be changed - - -def excess_generation(all_scalars): - """ - Calculates the excess generation for each energy vector - :param all_scalars: all scalars multiindex dataframe (from oemof tabular) - :return: dictionary containing all excess generation values - """ - # assuming your DataFrame has a MultiIndex with levels ("name", "var_name") - excess_rows = all_scalars[ - all_scalars.index.get_level_values("name").str.contains("excess") - ] - # convert the excess_rows DataFrame to a dictionary - excess_dict = excess_rows["var_value"].to_dict() - # extract only the first part of the MultiIndex ('name') and use it as the key - excess_dict = {(key[0]): value for key, value in excess_dict.items()} - - return excess_dict - - -def calculate_specific_system_cost(all_scalars, total_system_costs): - # if the units are in MWh, the specific cost will be in currency/MWh -> user needs to divide by 1000 to - # get to currency/kWh (usual standard for LCOE). I have left it general for now so systems can be set up - # in different scales e.g. kWh, MWh, GWh... but this could be adapted to always return a value in - # currency/kWh, requiring an input of the energy system scale (kWh, MWh, GWh etc) - """ - Calculates the specific system costs based on total system costs from optimization (this might change) and total - demand (including demands from all sectors) - :return: specific system cost - """ - # conditionally extract values based on the 'type' column - demand_values = all_scalars.loc[all_scalars["type"] == "load", "var_value"].tolist() - demand_values_sum = sum(demand_values) - # extract total_system_cost value from dataframe - total_system_cost = total_system_costs["var_value"].iloc[0] - # calculate specific system costs (currency/total demand) rounded to 2dp - specific_system_cost = round(total_system_cost / demand_values_sum, 2) - - return specific_system_cost - - -def calculate_renewable_share(results): - """ - Calculates the renewable share of generation based on the renewable factor set in the inputs. - :param results: oemof model results - :return: renewable share value - """ - # initiate renewable_generation and nonrenewable_generation values - total_renewable_generation = 0 - total_nonrenewable_generation = 0 - # set boolean for finding renewable factor parameter in any of the csv inputs - renewable_factor_found = False - - # loop through the results dict - for entry_key, entry_value in results.items(): - # store the 'sequences' value for each oemof object tuple in results dict - sequences = entry_value.get("sequences", None) - if sequences is None: - continue - # check if the oemof object tuple has the 'output_parameters' attribute - if hasattr(entry_key[0], "output_parameters"): - # store the 'output_parameters' dict as output_param_dict - output_param_dict = entry_key[0].output_parameters - # retrieve the 'renewable_factor' value - renewable_factor = output_param_dict.get("custom_attributes", {}).get( - "renewable_factor" - ) - if renewable_factor is not None: - # set to True because a renewable factor parameter has been found - renewable_factor_found = True - # store the total generation for the component - generation = sequences.sum().sum() - # multiply the total generation by the renewable factor to get the renewable generation - renewable_generation = generation * renewable_factor - # add this to the total amount for the whole system - total_renewable_generation += renewable_generation - # the nonrenewable generation is the total generation - renewable generation - nonrenewable_generation = generation - renewable_generation - # add this to the total amount for the whole system - total_nonrenewable_generation += nonrenewable_generation - - if renewable_factor_found is True: - total_generation = total_renewable_generation + total_nonrenewable_generation - # if total generation is 0, return 0 to avoid division by 0 - # ToDo: test this to see if still necessary or maybe adapt - if total_generation == 0: - warnings.warn( - "Total generation is 0. This may be because there is no generation.", - UserWarning, - ) - return 0 - # calculate the renewable share (rounded to 2dp) - renewable_share = round(total_renewable_generation / total_generation, 2) - else: - renewable_share = None - - return renewable_share - - -def calculate_total_emissions(results): - # At present, the total annual emissions is rounded to 2dp but maybe this value should - # be rounded to the nearest int - """Calculates the total annual emissions by applying the emission factor to the - aggregated flow of each component if the emission factor is defined in the csv inputs. - :param results: oemof model results - :return: total annual emissions value (2dp) - """ - # initiate total emissions value - total_emissions = 0 - emission_factor_found = False - # loop through the results dict - for entry_key, entry_value in results.items(): - # store the 'sequences' value for each oemof object tuple in results dict - sequences = entry_value.get("sequences", None) - # check if sequences exist and if they are relevant to flows (necessary for storage component - # where two items exist in results: one with sequences for storage content and one for flows) - if sequences is not None and "flow" in sequences.columns.get_level_values( - "var_name" - ): - # check if the oemof object tuple has the 'output_parameters' attribute - if hasattr(entry_key[0], "output_parameters"): - # store the 'output_parameters' dict as output_param_dict - output_param_dict = entry_key[0].output_parameters - # retrieve the 'emission_factor' value if it exists - # NOTE: this means that the user must define the emission factor as 'emission_factor' otherwise - # the total emissions won't be calculated - emission_factor = output_param_dict.get("custom_attributes", {}).get( - "emission_factor" - ) - if emission_factor is not None: - emission_factor_found = True - total_emissions += emission_factor * sequences.sum().sum() - # if the emission factor parameter is found in any input csv files, the value is stored and rounded to 2dp - if emission_factor_found is True: - total_emissions = round(total_emissions, 2) - # if the land requirement parameter is not found in any input csv files, the value is stored as None - else: - total_emissions = None - - return total_emissions - - -def create_capacities_table(all_scalars, results): - # ToDo: this function has a lot of repetition so can be made cleaner/shorter - the aim is that this - # function will be adapted and improved by getting the information from filtering the 'mother' - # multiindex dataframe once this has been created - """ - Creates a DataFrame containing information regarding the component capacities from the oemof - model results. - :param all_scalars: all scalars multiindex dataframe (from oemof tabular) - :param results: oemof model results - :return: capacities dataframe - """ - # set columns of the capacities dataframe - # NOTE: when this function is modified, the aim is to remove the initial setting of columns of the df - capacities_df = pd.DataFrame( - columns=[ - "Component", - "Type", - "Carrier", - "Existing Capacity", - "Capacity Potential", - "Optimizable", - "Optimized Capacity", - "Total Capacity", - ] - ) - # create an empty set for the component names - component_names = set() - # iterate over the index and row of the dataframe - for idx, row in all_scalars.iterrows(): - # store variables to be included in the dataframe - component_name = idx[0] - component_variable = idx[1] - component_type = row["type"] - component_carrier = row["carrier"] - # only include component names that haven't already been included to avoid repetition, - # and don't include system in the dataframe because this refers to the system costs (nothing with capacities) - # and don't include the storage components because these are stored in a different table - if ( - component_name not in component_names - and component_name != "system" - and "storage" not in component_name - ): - component_names.add(component_name) - # add component name and corresponding type and carrier to the dataframe - capacities_df = capacities_df._append( - { - "Component": component_name, - "Type": component_type, - "Carrier": component_carrier, - }, - ignore_index=True, - ) - - # check if 'invest_out' is in the component_variable - if "invest_out" in component_variable: - # if it is, get the corresponding 'var_value' for the optimized capacity value - component_opt_capacity = row["var_value"] - # if the value is -0.0, adapt this to 0.0 - if component_opt_capacity == -0.0: - component_opt_capacity = 0.0 - # add or update 'Optimized Capacity' for the component_name with the optimized capacity value - capacities_df.loc[ - capacities_df["Component"] == component_name, "Optimized Capacity" - ] = component_opt_capacity - - # loop through the results dict - for entry_key, entry_value in results.items(): - # check if the oemof object tuple has the 'capacity' attribute - if hasattr(entry_key[0], "capacity"): - # store the existing capacity as a variable - existing_capacity = entry_key[0].capacity - # convert entry_key[0] to string - component_name_str = str(entry_key[0]) - # check if component_name_str is in capacities_df['Component'] - if any( - component_name_str in val for val in capacities_df["Component"].values - ): - # update the existing capacity value in capacities_df - capacities_df.loc[ - capacities_df["Component"] == component_name_str, - "Existing Capacity", - ] = existing_capacity - # check if the oemof object tuple has the 'expandable' attribute - if hasattr(entry_key[0], "expandable"): - # store the expandable boolean as a variable - expandable = entry_key[0].expandable - # convert entry_key[0] to string - expandable_name_str = str(entry_key[0]) - # check if expandable_name_str is in capacities_df['Component'] - if any( - expandable_name_str in val for val in capacities_df["Component"].values - ): - # update the existing expandable value in capacities_df - capacities_df.loc[ - capacities_df["Component"] == expandable_name_str, "Optimizable" - ] = expandable - # check if the oemof object tuple has the 'capacity_potential' attribute - if hasattr(entry_key[0], "capacity_potential"): - # store the capacity potential as a variable - capacity_potential = entry_key[0].capacity_potential - # convert entry_key[0] to string - cp_name_str = str(entry_key[0]) - # check if cp_name_str is in capacities_df['Component'] - if any(cp_name_str in val for val in capacities_df["Component"].values): - # update the existing expandable value in capacities_df - capacities_df.loc[ - capacities_df["Component"] == cp_name_str, "Capacity Potential" - ] = capacity_potential - # temporarily replace nan values with 0 in existing capacity and optimized capacity columns in order - # to calculate the total capacity - capacities_df["Total Capacity"] = capacities_df["Existing Capacity"].fillna( - 0 - ) + capacities_df["Optimized Capacity"].fillna(0) - - return capacities_df - - -def create_storage_capacities_table(all_scalars, results): - # ToDo: this function requires the naming of storage components to have 'storage' in them, there is - # probably a cleaner way of doing it - # ToDo: this function and the above are very similar can probably be combined after the multi-index dataframe - # is implemented - # NOTE: for storages, storage capacity is the capacity in e.g. MWh and capacity is the - # max input/output in e.g. MW - # NOTE: this has been made a separate function to above because storage components have both - # optimizable capacities (MW) and storage capacities (MWh) and it might be interesting to display all of - # this information to understand how the storage works, but there is probably a better way to do this - """ - Creates a DataFrame containing information regarding the storage component capacities from the oemof - model results. - :param all_scalars: all scalars multiindex dataframe (from oemof tabular) - :param results: oemof model results - :return: storage capacities dataframe - """ - # set columns of the capacities dataframe - storage_capacities_df = pd.DataFrame( - columns=[ - "Component", - "Type", - "Carrier", - "Existing Storage Capacity", - "Existing Max Input/Output", - "Storage Capacity Potential", - "Max Input/Output Potential", - "Optimizable", - "Optimized Storage Capacity", - "Optimized Max Input/Output", - "Total Storage Capacity", - ] - ) - # create an empty set for the component names - component_names = set() - # iterate over the index and row of the dataframe - for idx, row in all_scalars.iterrows(): - # store variables to be included in the dataframe - component_name = idx[0] - component_variable = idx[1] - component_type = row["type"] - component_carrier = row["carrier"] - # only include component names that haven't already been included to avoid repetition, - # and only include component names that have 'storage' in - if component_name not in component_names and "storage" in component_name: - component_names.add(component_name) - # add component name and corresponding type and carrier to the dataframe - storage_capacities_df = storage_capacities_df._append( - { - "Component": component_name, - "Type": component_type, - "Carrier": component_carrier, - }, - ignore_index=True, - ) - # check if 'invest' is equal to the component_variable - if "invest" == component_variable: - # if it is, get the corresponding 'var_value' for the optimized capacity value - component_opt_capacity = row["var_value"] - # if the value is -0.0, adapt this to 0.0 - if component_opt_capacity == -0.0: - component_opt_capacity = 0.0 - # add or update 'Optimized Storage Capacity' for the component_name with the optimized capacity value - storage_capacities_df.loc[ - storage_capacities_df["Component"] == component_name, - "Optimized Storage Capacity", - ] = component_opt_capacity - # check if 'invest_out' is in the component_variable - if "invest_out" in component_variable: - # if it is, get the corresponding 'var_value' for the optimized capacity value - component_opt_capacity = row["var_value"] - # if the value is -0.0, adapt this to 0.0 - if component_opt_capacity == -0.0: - component_opt_capacity = 0.0 - # add or update 'Optimized Max Input/Output' for the component_name with the optimized capacity value - storage_capacities_df.loc[ - storage_capacities_df["Component"] == component_name, - "Optimized Max Input/Output", - ] = component_opt_capacity - - # loop through the results dict - for entry_key, entry_value in results.items(): - # check if the oemof object tuple has the 'storage_capacity' attribute - if hasattr(entry_key[0], "storage_capacity"): - # store the existing capacity as a variable - existing_storage_capacity = entry_key[0].storage_capacity - # convert entry_key[0] to string - component_name_str = str(entry_key[0]) - # check if component_name_str is in storage_capacities_df['Component'] - if any( - component_name_str in val - for val in storage_capacities_df["Component"].values - ): - # update the existing capacity value in capacities_df - storage_capacities_df.loc[ - storage_capacities_df["Component"] == component_name_str, - "Existing Storage Capacity", - ] = existing_storage_capacity - # check if the oemof object tuple has the 'capacity' attribute - if hasattr(entry_key[0], "capacity"): - # store the existing capacity as a variable - existing_capacity = entry_key[0].capacity - # convert entry_key[0] to string - component_name_str = str(entry_key[0]) - # check if component_name_str is in storage_capacities_df['Component'] - if any( - component_name_str in val - for val in storage_capacities_df["Component"].values - ): - # update the existing capacity value in capacities_df - storage_capacities_df.loc[ - storage_capacities_df["Component"] == component_name_str, - "Existing Max Input/Output", - ] = existing_capacity - # check if the oemof object tuple has the 'expandable' attribute - if hasattr(entry_key[0], "expandable"): - # store the expandable boolean as a variable - expandable = entry_key[0].expandable - # convert entry_key[0] to string - expandable_name_str = str(entry_key[0]) - # Check if expandable_name_str is in storage_capacities_df['Component'] - if any( - expandable_name_str in val - for val in storage_capacities_df["Component"].values - ): - # Update the existing expandable value in storage_capacities_df - storage_capacities_df.loc[ - storage_capacities_df["Component"] == expandable_name_str, - "Optimizable", - ] = expandable - # check if the oemof object tuple has the 'storage_capacity_potential' attribute - if hasattr(entry_key[0], "storage_capacity_potential"): - # store the storage capacity potential as a variable - storage_capacity_potential = entry_key[0].storage_capacity_potential - # convert entry_key[0] to string - cp_name_str = str(entry_key[0]) - # check if cp_name_str is in storage_capacities_df['Component'] - if any( - cp_name_str in val for val in storage_capacities_df["Component"].values - ): - # update the existing expandable value in storage_capacities_df - storage_capacities_df.loc[ - storage_capacities_df["Component"] == cp_name_str, - "Storage Capacity Potential", - ] = storage_capacity_potential - # check if the oemof object tuple has the 'capacity_potential' attribute - if hasattr(entry_key[0], "capacity_potential"): - # store the capacity potential as a variable - capacity_potential = entry_key[0].capacity_potential - # convert entry_key[0] to string - cp_name_str = str(entry_key[0]) - # check if cp_name_str is in storage_capacities_df['Component'] - if any( - cp_name_str in val for val in storage_capacities_df["Component"].values - ): - # update the existing expandable value in storage_capacities_df - storage_capacities_df.loc[ - storage_capacities_df["Component"] == cp_name_str, - "Max Input/Output Potential", - ] = capacity_potential - - # temporarily replace nan values with 0 in existing capacity and optimized capacity columns in order - # to calculate the total capacity - storage_capacities_df["Total Storage Capacity"] = storage_capacities_df[ - "Existing Storage Capacity" - ].fillna(0) + storage_capacities_df["Optimized Storage Capacity"].fillna(0) - - return storage_capacities_df - - -def calculate_total_land_requirement(results, capacities_df, storage_capacities_df): - # ToDo: this parameter should only be displayed in the results if the parameters have been defined in the - # CSV input files - """ - Calculates the total land requirement needed for the energy system (existing, planned (fixed) and optimized capacities). - :param results: oemof model results - :param capacities_df: capacities dataframe - :param storage_capacities_df: storage capacities dataframe - :return: total land requirement value - """ - # initiate total land requirement value - total_land_requirement = 0 - # set boolean for finding land requirement parameter in any of the csv inputs - land_requirement_found = False - # convert capacities_df['Component'] column to a list - component_names = capacities_df["Component"].tolist() - # convert storage_capacities_df['Component'] column to a list - storage_component_names = storage_capacities_df["Component"].tolist() - # loop through the results dict - for entry_key, entry_value in results.items(): - component_name = entry_key[0] - # check if the oemof object tuple has the 'output_parameters' attribute - if hasattr(component_name, "output_parameters"): - # store the 'output_parameters' dict as output_param_dict - output_param_dict = component_name.output_parameters - # retrieve the 'land_requirement' value if it exists - # NOTE: this means that the user must define the land requirement as 'land_requirement' otherwise - # it won't get considered in the total land requirement value - land_requirement = output_param_dict.get("custom_attributes", {}).get( - "land_requirement" - ) - if land_requirement is not None and str(component_name) in component_names: - # set to True because a land requirement parameter has been found - land_requirement_found = True - # retrieve the total capacity from capacities_df and calculate total land requirement - total_capacity = capacities_df.loc[ - capacities_df["Component"] == str(component_name), "Total Capacity" - ].iloc[0] - component_land_requirement = land_requirement * total_capacity - total_land_requirement += component_land_requirement - # for if the component is a storage type (for now is treated seperately but this can change) - elif ( - land_requirement is not None - and str(component_name) in storage_component_names - ): - # set to True because a land requirement parameter has been found - land_requirement_found = True - # store the 'sequences' value for each oemof object tuple in results dict - sequences = entry_value.get("sequences", None) - # storage objects are saved twice in oemof results: one for storage content and one for flows, so - # this is to only store the land requirement once for each storage component - if ( - sequences is not None - and "flow" in sequences.columns.get_level_values("var_name") - ): - # retrieve the total capacity from storage_capacities_df and calculate total land requirement - total_storage_capacity = storage_capacities_df.loc[ - storage_capacities_df["Component"] == str(component_name), - "Total Storage Capacity", - ].iloc[0] - component_land_requirement = ( - land_requirement * total_storage_capacity - ) - total_land_requirement += component_land_requirement - # if the land requirement parameter is found in any input csv files, the value is stored and rounded to 2dp - if land_requirement_found is True: - total_land_requirement = round(total_land_requirement, 2) - # if the land requirement parameter is not found in any input csv files, the value is stored as None - else: - total_land_requirement = None - - return total_land_requirement - - -def create_aggregated_flows_table(aggregated_flows): - """ - Creates a dataframe based on the aggregated flows from/to each component. It uses the - aggregated flows series generated from oemof tabular and puts it into a more readable dataframe - :param aggregated_flows: aggregated flows series (from oemof tabular) - :return: aggregated flows dataframe - """ - # create an empty DataFrame to store the flows - flows_df = pd.DataFrame(columns=["From", "To", "Aggregated Flow"]) - - # iterate over the items of the Series - for idx, value in aggregated_flows.items(): - # extract the source, target, and var_name from the index - from_, to, _ = idx - - # append a row to the DataFrame - flows_df = flows_df._append( - {"From": from_, "To": to, "Aggregated Flow": float(value)}, - ignore_index=True, - ) - - return flows_df - - -def create_costs_table(all_scalars, results, capacities_df, storage_capacities_df): - # ToDo: make this function more concise and clear once multi-index dataframe is implemented. - """ - Creates a DataFrame containing information regarding the costs from the oemof model results. - :param all_scalars: all scalars multiindex dataframe (from oemof tabular) - :param results: oemof model results - :param capacities_df: capacities dataframe - :param storage_capacities_df: storage capacities dataframe - :return: costs dataframe - """ - # create an empty dataframe - costs_df = pd.DataFrame( - columns=[ - "Component", - "Upfront Investment Cost", - "Annuity (CAPEX + Fixed O&M)", - "Variable Costs (In)", - "Variable Costs (Out)", - ] - ) - # create an empty set for the component names - component_names = set() - # iterate over the index and row of the dataframe - for idx, row in all_scalars.iterrows(): - # store variables to be included in the dataframe - component_name = idx[0] - component_variable = idx[1] - # only include component names that haven't already been included to avoid repetition, - # and don't include system in the dataframe because this refers to the total system costs (include elsewhere) - # and don't include the storage components because these are stored in a different table - if ( - component_name not in component_names - and component_name != "system" - and "storage" not in component_name - ): - component_names.add(component_name) - # add component name and corresponding type and carrier to the dataframe - costs_df = costs_df._append( - {"Component": component_name}, ignore_index=True - ) - - # check if 'invest_costs_out' is in the component_variable - if "invest_costs_out" in component_variable: - # if it is, get the corresponding 'var_value' for the investment cost value - invest_costs_out = row["var_value"] - # if the value is -0.0, adapt this to 0.0 - if invest_costs_out == -0.0: - invest_costs_out = 0.0 - # add or update 'Annuity (CAPEX + Fixed O&M)' for the component_name with the optimized capacity value - costs_df.loc[ - costs_df["Component"] == component_name, "Annuity (CAPEX + Fixed O&M)" - ] = invest_costs_out - # check if 'variable_costs_in' is in the component_variable - if "variable_costs_in" in component_variable: - # if it is, get the corresponding 'var_value' for the variable cost in value - variable_costs_in = row["var_value"] - # if the value is -0.0, adapt this to 0.0 - if variable_costs_in == -0.0: - variable_costs_in = 0.0 - # add or update 'Variable Costs (In)' for the component_name with the optimized capacity value - costs_df.loc[ - costs_df["Component"] == component_name, "Variable Costs (In)" - ] = variable_costs_in - # check if 'variable_costs_out' is in the component_variable - if "variable_costs_out" in component_variable: - # if it is, get the corresponding 'var_value' for the variable cost out value - variable_costs_out = row["var_value"] - # if the value is -0.0, adapt this to 0.0 - if variable_costs_out == -0.0: - variable_costs_out = 0.0 - # add or update 'Variable Costs (Out)' for the component_name with the optimized capacity value - costs_df.loc[ - costs_df["Component"] == component_name, "Variable Costs (Out)" - ] = variable_costs_out - - # loop through the results dict - for entry_key, entry_value in results.items(): - # check if the oemof object tuple has the 'capex' attribute - if hasattr(entry_key[0], "capex"): - # store the existing capacity as a variable - specific_capex = entry_key[0].capex - # convert entry_key[0] to string - component_name_str = str(entry_key[0]) - # find the corresponding row in capacities_df for the component - capacities_row = capacities_df[ - capacities_df["Component"] == component_name_str - ] - # check if the component is a storage component - if "storage" in component_name_str: - # if it is, find the corresponding row in storage_capacities_df - storage_capacities_row = storage_capacities_df[ - storage_capacities_df["Component"] == component_name_str - ] - # calculate the total storage capacity by summing existing and optimized capacity - optimized_storage_capacity = storage_capacities_row[ - "Optimized Storage Capacity" - ].values[0] - # multiply the capex by the optimized storage capacity - upfront_investment_cost = specific_capex * optimized_storage_capacity - else: - # if it's not a storage component, calculate the total capacity by summing existing and - # optimized capacity - optimized_capacity = capacities_row["Optimized Capacity"].values[0] - # multiply the capex by the optimized capacity - upfront_investment_cost = specific_capex * optimized_capacity - - # add or update 'Upfront Investment Cost' for the component_name with the calculated value - costs_df.loc[ - costs_df["Component"] == component_name_str, "Upfront Investment Cost" - ] = upfront_investment_cost - - return costs_df - - # ------ New post-processing to create tables ------ # This dictionary contains groups of columns that should be extracted from the df_results to generate a clearer overview -# TODO add a column for planned capacity (not optimizable but including costs) in capacities if it gets properly implemented -# (planned capacity can be set by setting capacity_minimum == capacity_potential and dispatchable = True +# TODO add a column for planned capacity (not optimizable but including costs) in capacities if it gets properly +# implemented (planned capacity can be set by setting capacity_minimum == capacity_potential and dispatchable = True RESULT_TABLE_COLUMNS = { "costs": ["upfront_investment_costs", "total_annuity", "total_variable_costs"], "capacities": [ @@ -814,60 +169,4 @@ def post_processing(params, es, results_path, dp_path): filepath_name_all_sequences = os.path.join(results_path, "all_sequences.csv") all_sequences.sequences.to_csv(filepath_name_all_sequences) - capacities_df = create_capacities_table(all_scalars, results) - storage_capacities_df = create_storage_capacities_table(all_scalars, results) - flows_df = create_aggregated_flows_table(aggregated_flows) - costs_df = create_costs_table( - all_scalars, results, capacities_df, storage_capacities_df - ) - - # store the relevant KPI variables and their corresponding values - kpi_variables = [ - "specific_system_cost", - "renewable_share", - # "total_emissions", - "total_land_requirement", - ] - kpi_values = [ - calculate_specific_system_cost(all_scalars, total_system_costs), - calculate_renewable_share(results), - # calculate_total_emissions(results), - calculate_total_land_requirement(results, capacities_df, storage_capacities_df), - ] - # filter out None values - filtered_kpi_data = { - "Variable": [ - var for var, val in zip(kpi_variables, kpi_values) if val is not None - ], - "Value": [val for val in kpi_values if val is not None], - } - # create the DataFrame - kpi_df = pd.DataFrame(filtered_kpi_data) - excess_gen = excess_generation(all_scalars) - # add the excess generation values for each vector to the KPI DataFrame - for key, value in excess_gen.items(): - kpi_df = kpi_df._append({"Variable": key, "Value": value}, ignore_index=True) - # replace any parameters with '-' in the name with '_' for uniformity - kpi_df["Variable"] = kpi_df["Variable"].str.replace("-", "_") - # save all KPI results to a csv file - filepath_name_kpis = os.path.join(results_path, "kpis.csv") - # save the DataFrame to a CSV file - kpi_df.to_csv(filepath_name_kpis, index=False) - # save all capacities to a csv file - filepath_name_capacities = os.path.join(results_path, "capacities.csv") - # save the DataFrame to a CSV file - capacities_df.to_csv(filepath_name_capacities, index=False) - # save all storage capacities to a csv file - filepath_name_stor_capacities = os.path.join(results_path, "storage_capacities.csv") - # save the DataFrame to a CSV file - storage_capacities_df.to_csv(filepath_name_stor_capacities, index=False) - # save all flows to a csv file - filepath_name_flows = os.path.join(results_path, "flows.csv") - # save the DataFrame to a CSV file - flows_df.to_csv(filepath_name_flows, index=False) - # save all costs to a csv file - filepath_name_costs = os.path.join(results_path, "costs.csv") - # save the DataFrame to a CSV file - costs_df.to_csv(filepath_name_costs, index=False) - - return calculator + return From 4ef4fc6ed759a3a06d13f8b4b099e5b05b2fbfab Mon Sep 17 00:00:00 2001 From: "ciara.dunks" Date: Tue, 18 Jun 2024 10:57:46 +0200 Subject: [PATCH 17/20] Deleted print statements from MIMO converter --- src/oemof_tabular_plugins/wefe/facades/mimo.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/oemof_tabular_plugins/wefe/facades/mimo.py b/src/oemof_tabular_plugins/wefe/facades/mimo.py index ab26d450..fcaf1d98 100644 --- a/src/oemof_tabular_plugins/wefe/facades/mimo.py +++ b/src/oemof_tabular_plugins/wefe/facades/mimo.py @@ -11,7 +11,6 @@ def validate_mimo_datapackage(cls, resource): :param cls: class instance :param resource: the datapackage resource """ - print("resource in validation: ", resource) # check if the 'foreignKeys' field exists in the schema if ( "schema" in resource.descriptor @@ -19,9 +18,7 @@ def validate_mimo_datapackage(cls, resource): ): # loop through each foreign key for foreign_key in resource.descriptor["schema"]["foreignKeys"]: - print(foreign_key) if "primary" in foreign_key["fields"]: - print(foreign_key) # remove the foreign_key regarding 'primary' from the resource resource.descriptor["schema"]["foreignKeys"].remove(foreign_key) break From bf567e57b051a43bf7d5eaecd5de29132c7247de Mon Sep 17 00:00:00 2001 From: "ciara.dunks" Date: Wed, 19 Jun 2024 15:52:11 +0200 Subject: [PATCH 18/20] Quick fix for correcting cost results and including renewable share --- .../datapackage/post_processing.py | 303 +++++++++++++----- 1 file changed, 226 insertions(+), 77 deletions(-) diff --git a/src/oemof_tabular_plugins/datapackage/post_processing.py b/src/oemof_tabular_plugins/datapackage/post_processing.py index ff9b82d5..75f95e26 100644 --- a/src/oemof_tabular_plugins/datapackage/post_processing.py +++ b/src/oemof_tabular_plugins/datapackage/post_processing.py @@ -32,16 +32,27 @@ ] -def compute_total_capacity(results_df): - # ToDo: check for storage where there is both capacity and storage capacity +# Functions for results per component +def compute_capacity_total(results_df): + # ToDo: how it is here is that now total capacity is considering the storage capacity (in MWh) for storage. + # Check how the storage capacities should be displayed in the results to make it not confusing for the user. Maybe + # the storage components need two total capacity results (one for power and one for energy)? """Calculates total capacity by adding existing capacity (capacity) to optimized capacity (investments)""" - return results_df.capacity + results_df.investments + if "storage" in results_df.name: + return results_df.storage_capacity + results_df.investments + else: + return results_df.capacity + results_df.investments -def compute_total_annuity(results_df): - """Calculates total capacity by adding existing capacity (capacity) to optimized capacity (investments)""" - # TODO fix this to use storage_capacity_cost for the storage (or fix on the storage side) - return results_df.capacity_cost + results_df.investments +def compute_annuity_total(results_df): + """Calculates total annuity by multiplying the annuity by the optimized capacity""" + # ToDo: now storage_capacity_cost is used for the annuity if the component is storage. + # Check that this is correctly applied for storage components or if two different costs should + # be calculated (one for power and one for energy) + if "storage" in results_df.name: + return results_df.storage_capacity_cost * results_df.investments + else: + return results_df.capacity_cost * results_df.investments def compute_upfront_investment_costs(results_df): @@ -91,7 +102,7 @@ def compute_co2_emissions(results_df): return results_df.aggregated_flow * results_df.emission_factor -def compute_additional_land_requirement(results_df): +def compute_land_requirement_additional(results_df): """Calculates land requirement needed for optimized capacities""" if "land_requirement_factor" not in results_df.index: return None @@ -99,12 +110,12 @@ def compute_additional_land_requirement(results_df): return results_df.investments * results_df.land_requirement_factor -def compute_total_land_requirement(results_df): +def compute_land_requirement_total(results_df): """Calculates land requirement needed for total capacities""" if "land_requirement_factor" not in results_df.index: return None else: - return results_df.total_capacity * results_df.land_requirement_factor + return results_df.capacity_total * results_df.land_requirement_factor def compute_water_footprint(results_df): @@ -115,51 +126,149 @@ def compute_water_footprint(results_df): return results_df.aggregated_flow * results_df.water_footprint_factor -def compute_total_system_cost(results_df): +# Functions for whole system results +def compute_system_annuity_total(results_df): + """Calculates system total annuity by summing the total annuity for each component""" + # ToDo: this method looks through each component and if it is mentioned twice, the annuity only + # gets considered once e.g. for storage, except for a MIMO because the costs only get considered once already. + # This is a quick fix and I didn't have time to figure out how this should be done in the cleanest way + seen_components = set() + annuity_total = 0 + for index, row in results_df.iterrows(): + component_name = index[2] + # check if the component has been included before + if component_name not in seen_components: + annuity_value = row["annuity_total"] + if pd.isna(annuity_value): + annuity_value = 0 + annuity_total += annuity_value + # this is a quick fix to not include the MIMO converter because the asset type is 'nan' + # this should definitely be changed once implemented properly + if not pd.isna(index[4]): + seen_components.add(component_name) + return annuity_total + + +def compute_system_variable_costs_total(results_df): + """Calculates the total variable costs by summing the variable costs for each component flow""" + # This function has not been implemented the same as above because here we want to consider the variable + # costs attached to each flow instead of each component + variable_costs_total = results_df["variable_costs_total"].sum() + return variable_costs_total + + +def compute_system_cost_total(results_df): """Calculates the total system cost by summing the total annuity and total variable costs for each component""" - total_system_cost = ( - results_df["total_annuity"].sum() + results_df["total_variable_costs"].sum() - ) - return total_system_cost - - -def compute_total_upfront_investments(results_df): - """Calculates the total upfront investments by summing the upfront investments for each component""" - total_upfront_investments = results_df["upfront_investment_costs"].sum() - return total_upfront_investments - - -def compute_total_emissions(results_df): + # ToDo: quick fix - I didn't have time but rather than repeating the functions above, it would be good to calculate + # this from the kpis dataframe instead of results_df (then can use the annuity_total and variable_costs_total + # directly. To do this though, the apply_kpi_calculations function has to be adapted + seen_components = set() + annuity_total = 0 + for index, row in results_df.iterrows(): + component_name = index[2] + # check if the component has been included before + if component_name not in seen_components: + annuity_value = row["annuity_total"] + if pd.isna(annuity_value): + annuity_value = 0 + annuity_total += annuity_value + # this is a quick fix to not include the MIMO converter because the asset type is 'nan' + # this should definitely be changed once implemented properly + if not pd.isna(index[4]): + seen_components.add(component_name) + variable_costs_total = results_df["variable_costs_total"].sum() + system_cost_total = annuity_total + variable_costs_total + return system_cost_total + + +def compute_system_upfront_investments_total(results_df): """Calculates the total upfront investments by summing the upfront investments for each component""" - total_emissions = results_df["co2_emissions"].sum() - return total_emissions - - -def compute_system_additional_land_requirement(results_df): + # ToDo: this method looks through each component and if it is mentioned twice, the annuity only + # gets considered once e.g. for storage, except for a MIMO because the costs only get considered once already. + # This is a quick fix and I didn't have time to figure out how this should be done in the cleanest way + seen_components = set() + upfront_investments_total = 0 + for index, row in results_df.iterrows(): + component_name = index[2] + # check if the component has been included before + if component_name not in seen_components: + upfront_investment = row["upfront_investment_costs"] + if pd.isna(upfront_investment): + upfront_investment = 0 + upfront_investments_total += upfront_investment + # this is a quick fix to not include the MIMO converter because the asset type is 'nan' + # this should definitely be changed once implemented properly + if not pd.isna(index[4]): + seen_components.add(component_name) + return upfront_investments_total + + +def compute_system_co2_emissions_total(results_df): + """Calculates the total CO2 emissions by summing up the CO2 emissions on each component flow""" + # ToDo: so far these are simply summed for each flow, but should check this is correct in every case + emissions_total = results_df["co2_emissions"].sum() + return emissions_total + + +def compute_system_land_requirement_additional(results_df): """Calculates the additional land requirement from optimized capacities by summing the additional land requirement for each component""" - additional_land_requirement = results_df["additional_land_requirement"].sum() - return additional_land_requirement - - -def compute_system_total_land_requirement(results_df): + # ToDo: this method looks through each component and if it is mentioned twice, the annuity only + # gets considered once e.g. for storage, except for a MIMO because the costs only get considered once already. + # This is a quick fix and I didn't have time to figure out how this should be done in the cleanest way + seen_components = set() + add_land_requirement_total = 0 + for index, row in results_df.iterrows(): + component_name = index[2] + # check if the component has been included before + if component_name not in seen_components: + add_land_requirement = row["land_requirement_additional"] + if pd.isna(add_land_requirement): + add_land_requirement = 0 + add_land_requirement_total += add_land_requirement + # this is a quick fix to not include the MIMO converter because the asset type is 'nan' + # this should definitely be changed once implemented properly + if not pd.isna(index[4]): + seen_components.add(component_name) + return add_land_requirement_total + + +def compute_system_land_requirement_total(results_df): """Calculates the total land requirement by summing the total land requirement for each component""" - total_land_requirement = results_df["total_land_requirement"].sum() - return total_land_requirement - - -def compute_total_water_footprint(results_df): + # ToDo: this method looks through each component and if it is mentioned twice, the annuity only + # gets considered once e.g. for storage, except for a MIMO because the costs only get considered once already. + # This is a quick fix and I didn't have time to figure out how this should be done in the cleanest way + seen_components = set() + land_requirement_total = 0 + for index, row in results_df.iterrows(): + component_name = index[2] + # check if the component has been included before + if component_name not in seen_components: + land_requirement = row["land_requirement_total"] + if pd.isna(land_requirement): + land_requirement = 0 + land_requirement_total += land_requirement + # this is a quick fix to not include the MIMO converter because the asset type is 'nan' + # this should definitely be changed once implemented properly + if not pd.isna(index[4]): + seen_components.add(component_name) + return land_requirement_total + + +def compute_water_footprint_total(results_df): """Calculates the total water footprint by summing the total water footprint for each component""" - total_water_footprint = results_df["water_footprint"].sum() - return total_water_footprint + # ToDo: so far these are simply summed for each flow, but should check this is correct in every case + water_footprint_total = results_df["water_footprint"].sum() + return water_footprint_total def compute_specific_system_cost(results_df): """Calculates the total upfront investments by summing the upfront investments for each component""" # ToDo: will need to be adapted when non-energetic loads are included - for now only electricity is # considered but this is not correct - # ToDo: NEED TO CHANGE: somehow select only electricity components to calculate LCOE - discuss with Paula + # ToDo: need to decide how this should be calculated for energy systems with multiple carriers + # (both energetic and non-energetic) total_load = 0 total_system_cost = ( results_df["total_annuity"].sum() + results_df["total_variable_costs"].sum() @@ -174,6 +283,28 @@ def compute_specific_system_cost(results_df): return specific_system_cost +def compute_renewable_share(results_df): + """Calculates the renewable share based on the renewable generation of each flow and the + total aggregated flow of any component where the renewable factor is set (should be only set on sources) + """ + # ToDo: this might need to be reconsidered when the renewable share is set on a non-source component + # e.g. if the PV panel is a transformer component and the renewable share is on the output. It might still + # work but definitely needs to be checked + renewable_generation_total = 0 + generation_total = 0 + for index, row in results_df.iterrows(): + if not pd.isna(row["renewable_factor"]): + generation = row["aggregated_flow"] + renewable_generation = row["aggregated_flow"] * row["renewable_factor"] + generation_total += generation + renewable_generation_total += renewable_generation + if generation_total == 0: + renewable_share = 0 + else: + renewable_share = renewable_generation_total / generation_total + return renewable_share + + def _check_arguments(df, column_names, col_name): """Check that all required argument are present in the DataFrame columns""" for arg in column_names: @@ -186,15 +317,15 @@ def _check_arguments(df, column_names, col_name): # TODO turn the dict into a class simular to the one of Calculation of oemof.tabular CALCULATED_OUTPUTS = [ { - "column_name": "total_capacity", - "operation": compute_total_capacity, + "column_name": "capacity_total", + "operation": compute_capacity_total, "description": "The total capacity is calculated by adding the optimized capacity (investments) " "to the existing capacity (capacity)", "argument_names": ["investments", "capacity"], }, { - "column_name": "total_annuity", - "operation": compute_total_annuity, + "column_name": "annuity_total", + "operation": compute_annuity_total, "description": "Total annuity is calculated by multiplying the optimized capacity " "by the capacity cost (annuity considering CAPEX, OPEX and WACC)", "argument_names": ["investments", "capacity_cost"], @@ -207,14 +338,14 @@ def _check_arguments(df, column_names, col_name): "argument_names": ["investments", "capex"], }, { - "column_name": "total_opex_fix_costs", + "column_name": "opex_fix_costs_total", "operation": compute_opex_fix_costs, "description": "Operation and maintenance costs are calculated by multiplying the optimized capacity " "by the OPEX", - "argument_names": ["aggregated_flow", "marginal_cost", "carrier_cost"], + "argument_names": ["aggregated_flow", "opex_fix"], }, { - "column_name": "total_variable_costs", + "column_name": "variable_costs_total", "operation": compute_variable_costs, "description": "Variable costs are calculated by multiplying the total flow " "by the marginal/carrier costs", @@ -237,16 +368,16 @@ def _check_arguments(df, column_names, col_name): "argument_names": ["aggregated_flow", "emission_factor"], }, { - "column_name": "additional_land_requirement", - "operation": compute_additional_land_requirement, + "column_name": "land_requirement_additional", + "operation": compute_land_requirement_additional, "description": "The additional land requirement calculates the land required for the optimized capacities", "argument_names": ["investments", "land_requirement_factor"], }, { - "column_name": "total_land_requirement", - "operation": compute_total_land_requirement, + "column_name": "land_requirement_total", + "operation": compute_land_requirement_total, "description": "The total land requirement calculates the land required for the total capacities", - "argument_names": ["total_capacity", "land_requirement_factor"], + "argument_names": ["capacity_total", "land_requirement_factor"], }, { "column_name": "water_footprint", @@ -262,17 +393,27 @@ def _check_arguments(df, column_names, col_name): # Probably this should be included with the other CALCULATED_OUTPUTS eventually, but should ask PF CALCULATED_KPIS = [ { - "column_name": "total_system_cost", - "operation": compute_total_system_cost, - "description": "The total system cost is calculated by summing up the total annuity (CAPEX and OPEX fix " - "multipled by the optimized capacity) and the total variable costs (including carrier and" - "marginal costs) for each component and then summing the values up to get the total value" - "for the system", - "argument_names": ["total_annuity", "total_variable_costs"], + "column_name": "annuity_total", + "operation": compute_system_annuity_total, + "description": "The system total annuity is calculated by summing up the total annuity for each component", + "argument_names": ["annuity_total"], + }, + { + "column_name": "variable_costs_total", + "operation": compute_system_variable_costs_total, + "description": "The system total variable costs is calculated by summing up the total variable costs for " + "each component flow", + "argument_names": ["variable_costs_total"], + }, + { + "column_name": "system_cost_total", + "operation": compute_system_cost_total, + "description": "The total system cost is calculated by adding the total annuity to the total variable costs", + "argument_names": ["annuity_total", "variable_costs_total"], }, { "column_name": "total_upfront_investments", - "operation": compute_total_upfront_investments, + "operation": compute_system_upfront_investments_total, "description": "The total upfront investments value is calculated by summing the upfront investment" "costs for each component", "argument_names": ["upfront_investment_costs"], @@ -284,33 +425,40 @@ def _check_arguments(df, column_names, col_name): "argument_names": ["aggregated_flow", "total_annuity", "total_variable_costs"], }, { - "column_name": "total_emissions", - "operation": compute_total_emissions, + "column_name": "co2_emissions_total", + "operation": compute_system_co2_emissions_total, "description": "The total emissions is calculated by summing the c02 emissions " "for each component", "argument_names": ["co2_emissions"], }, { - "column_name": "additional_land_requirement", - "operation": compute_system_additional_land_requirement, + "column_name": "land_requirement_additional", + "operation": compute_system_land_requirement_additional, "description": "The total additional land requirement is calculated by summing the additional land requirement " "for each component", - "argument_names": ["additional_land_requirement"], + "argument_names": ["land_requirement_additional"], }, { - "column_name": "total_land_requirement", - "operation": compute_system_total_land_requirement, + "column_name": "land_requirement_total", + "operation": compute_system_land_requirement_total, "description": "The total land requirement is calculated by summing the total land requirement " "for each component", - "argument_names": ["total_land_requirement"], + "argument_names": ["land_requirement_total"], }, { "column_name": "total_water_footprint", - "operation": compute_total_water_footprint, + "operation": compute_water_footprint_total, "description": "The total water footprint is calculated by summing the water footprint required " "for each component", "argument_names": ["water_footprint"], }, + { + "column_name": "renewable_share", + "operation": compute_renewable_share, + "description": "The renewable share is calculated by dividing the renewable generation by the total " + "generation", + "argument_names": ["renewable_factor", "aggregated_flow"], + }, ] # Add docstrings from function handles for documentation purposes @@ -634,12 +782,13 @@ def apply_calculations(results_df, calculations=CALCULATED_OUTPUTS): func_handle, axis=1, ) - # check if the new column contains all None values and remove it if so - if results_df[var_name].isna().all(): - results_df.drop(columns=[var_name], inplace=True) - logging.info( - f"Removed column '{var_name}' because it contains all None values." - ) + # ToDo: I've commented this out for now but decide if this or some form should be kept in + # # check if the new column contains all None values and remove it if so + # if results_df[var_name].isna().all(): + # results_df.drop(columns=[var_name], inplace=True) + # logging.info( + # f"Removed column '{var_name}' because it contains all None values." + # ) def apply_kpi_calculations(results_df, calculations=CALCULATED_KPIS): From dee9608feb722c1e46fa39187f244decc7085d59 Mon Sep 17 00:00:00 2001 From: "ciara.dunks" Date: Thu, 20 Jun 2024 12:38:32 +0200 Subject: [PATCH 19/20] Add oemof-tabular dev and oemof-industry saltwater requirement --- requirements/build_requirements.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/requirements/build_requirements.txt b/requirements/build_requirements.txt index 2f99ad1e..c508c637 100644 --- a/requirements/build_requirements.txt +++ b/requirements/build_requirements.txt @@ -1,2 +1,4 @@ # Right now this is a problem as oemof.tabular is not installable from releases because of requirements conflictspytho -oemof-tabular +numpy==1.26.0 +oemof.tabular @ git+https://github.com/oemof/oemof-tabular.git@dev +oemof-industry @ git+https://github.com/sedos-project/oemof.industry.git@saltwater From 60b5b3985bf7c86b19c592e094ac3b92b1a65138 Mon Sep 17 00:00:00 2001 From: "pierre-francois.duc" Date: Mon, 24 Jun 2024 14:45:32 +0200 Subject: [PATCH 20/20] Code cosmetics --- examples/scripts/compute.py | 12 ++++++++++-- .../datapackage/post_processing.py | 15 +-------------- tests/test_pre_processing.py | 2 -- 3 files changed, 11 insertions(+), 18 deletions(-) diff --git a/examples/scripts/compute.py b/examples/scripts/compute.py index 40403438..4cf8fe32 100644 --- a/examples/scripts/compute.py +++ b/examples/scripts/compute.py @@ -26,7 +26,15 @@ # -------------- USER INPUTS -------------- # list of scenarios to be evaluated -scenarios = ["cp_nigeria_usungwe_example"] +scenarios = [ + "general_add_cost_inputs", + "general_basic", + "general_constraints", + "general_custom_attributes", + "wefe_custom_attributes", + "wefe_pv_panel", + "wefe_reverse_osmosis", +] # weighted average cost of capital (WACC) - might move later # this parameter is needed if CAPEX, OPEX fix and lifetime are included wacc = 0.06 @@ -71,6 +79,7 @@ attributemap={}, typemap=TYPEMAP, ) + logger.info("Energy system created from datapackage") # create model from energy system (this is just oemof.solph) @@ -92,7 +101,6 @@ # extract parameters and results params = parameter_as_dict(es) - results = m.results() es.results = processing.results(m) post_processing( diff --git a/src/oemof_tabular_plugins/datapackage/post_processing.py b/src/oemof_tabular_plugins/datapackage/post_processing.py index 75f95e26..562f15ed 100644 --- a/src/oemof_tabular_plugins/datapackage/post_processing.py +++ b/src/oemof_tabular_plugins/datapackage/post_processing.py @@ -277,7 +277,6 @@ def compute_specific_system_cost(results_df): # This is a quick fix to not include water - need to talk to Julian about how other demands should # be considered if index[4] == "load" and index[3] == "electricity": - print("asset type in index") total_load += row.get("aggregated_flow", 0) specific_system_cost = total_system_cost / total_load return specific_system_cost @@ -462,15 +461,7 @@ def _check_arguments(df, column_names, col_name): ] # Add docstrings from function handles for documentation purposes -for calc in CALCULATED_OUTPUTS: - func_handle = calc.get("operation", None) - if callable(func_handle): - calc["docstring"] = func_handle.__doc__ - else: - calc["docstring"] = "" - -# Add docstrings from function handles for documentation purposes -for calc in CALCULATED_KPIS: +for calc in CALCULATED_OUTPUTS + CALCULATED_KPIS: func_handle = calc.get("operation", None) if callable(func_handle): calc["docstring"] = func_handle.__doc__ @@ -519,10 +510,6 @@ def infer_busses_carrier(energy_system): bus_label = getattr(node, attribute).label if bus_label in busses_carrier: if busses_carrier[bus_label] != node.carrier: - print( - "busses carrier[bus label]", busses_carrier[bus_label] - ) - print("node.carrier: ", node.carrier) raise ValueError( f"Two different carriers ({busses_carrier[bus_label]}, {node.carrier}) are associated to the same bus '{bus_label}'" ) diff --git a/tests/test_pre_processing.py b/tests/test_pre_processing.py index 159e9d4f..c758ab79 100644 --- a/tests/test_pre_processing.py +++ b/tests/test_pre_processing.py @@ -282,7 +282,6 @@ def test_no_annuity_all_cost_params_defined_creates_annuity_param(self): pre_processing(self.pre_p_dir, wacc=wacc) # check that the 'capacity_cost' parameter is added to the scenario csv file df = pd.read_csv(os.path.join(self.package_path, f_name), delimiter=";") - print(df.columns) assert "capacity_cost" in df.columns def test_no_annuity_all_cost_params_defined_calculates_annuity(self): @@ -420,7 +419,6 @@ def test_output_params_added_to_csv_with_cust_attr_in_csv_and_list(self): ) # read the updated csv file updated_df = pd.read_csv(os.path.join(self.package_path, f_name), sep=";") - print(updated_df.columns) # assert that 'output_parameters' column is in the updated dataframe assert "output_parameters" in updated_df.columns, ( "'output_parameters' column is not present " "in the updated dataframe"