Skip to content

Commit

Permalink
Black reformating
Browse files Browse the repository at this point in the history
  • Loading branch information
romainsacchi committed Sep 26, 2023
1 parent 0fe87b8 commit f2de32d
Show file tree
Hide file tree
Showing 7 changed files with 357 additions and 90 deletions.
56 changes: 46 additions & 10 deletions premise/data_collection.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,11 @@ def get_gains_EU_data() -> xr.DataArray:
)
gains_emi_EU["sector"] = gains_emi_EU["Sector"] + gains_emi_EU["Activity"]
gains_emi_EU.drop(
["Sector", "Activity",], axis=1,
[
"Sector",
"Activity",
],
axis=1,
)

gains_emi_EU = gains_emi_EU[~gains_emi_EU["value"].isna()]
Expand Down Expand Up @@ -225,15 +229,19 @@ def fix_efficiencies(data: xr.DataArray, min_year: int) -> xr.DataArray:
# we correct it to 1, as we do not accept
# that efficiency degrades over time
data.loc[dict(year=[y for y in data.year.values if y > 2020])] = np.clip(
data.loc[dict(year=[y for y in data.year.values if y > 2020])], 1, None,
data.loc[dict(year=[y for y in data.year.values if y > 2020])],
1,
None,
)

# Inversely, if we are looking at a year prior to 2020
# and the ratio in efficiency change is superior to 1
# we correct it to 1, as we do not accept
# that efficiency in the past was higher than now
data.loc[dict(year=[y for y in data.year.values if y < 2020])] = np.clip(
data.loc[dict(year=[y for y in data.year.values if y < 2020])], None, 1,
data.loc[dict(year=[y for y in data.year.values if y < 2020])],
None,
1,
)

# ensure that efficiency can not decrease over time
Expand Down Expand Up @@ -390,7 +398,9 @@ def __init__(
new_vars = flatten(new_vars)

data = self.__get_iam_data(
key=key, filedir=filepath_iam_files, variables=new_vars,
key=key,
filedir=filepath_iam_files,
variables=new_vars,
)

self.regions = data.region.values.tolist()
Expand All @@ -402,7 +412,9 @@ def __init__(
)

self.electricity_markets = self.__fetch_market_data(
data=data, input_vars=electricity_prod_vars, system_model=self.system_model,
data=data,
input_vars=electricity_prod_vars,
system_model=self.system_model,
)

self.petrol_markets = self.__fetch_market_data(
Expand All @@ -427,7 +439,12 @@ def __init__(
input_vars={
k: v
for k, v in fuel_prod_vars.items()
if any(x in k for x in ["diesel",])
if any(
x in k
for x in [
"diesel",
]
)
},
system_model=self.system_model,
)
Expand All @@ -454,7 +471,12 @@ def __init__(
input_vars={
k: v
for k, v in fuel_prod_vars.items()
if any(x in k for x in ["hydrogen",])
if any(
x in k
for x in [
"hydrogen",
]
)
},
system_model=self.system_model,
)
Expand Down Expand Up @@ -511,7 +533,12 @@ def __init__(
efficiency_labels={
k: v
for k, v in fuel_eff_vars.items()
if any(x in k for x in ["diesel",])
if any(
x in k
for x in [
"diesel",
]
)
},
)
self.gas_efficiencies = self.get_iam_efficiencies(
Expand All @@ -527,7 +554,12 @@ def __init__(
efficiency_labels={
k: v
for k, v in fuel_eff_vars.items()
if any(x in k for x in ["hydrogen",])
if any(
x in k
for x in [
"hydrogen",
]
)
},
)

Expand Down Expand Up @@ -1230,7 +1262,11 @@ def fetch_external_data_coal_power_plants(self):
df = df.drop(columns=["fuel input"])
array = (
df.melt(
id_vars=["country", "CHP", "fuel",],
id_vars=[
"country",
"CHP",
"fuel",
],
var_name="variable",
value_name="value",
)
Expand Down
45 changes: 37 additions & 8 deletions premise/ecoinvent_modification.py
Original file line number Diff line number Diff line change
Expand Up @@ -732,12 +732,18 @@ def __import_inventories(self, keep_uncertainty_data: bool = False) -> List[dict
(FILEPATH_SYNGAS_FROM_COAL_INVENTORIES, "3.7"),
(FILEPATH_BIOFUEL_INVENTORIES, "3.7"),
(FILEPATH_SYNFUEL_INVENTORIES, "3.7"),
(FILEPATH_SYNFUEL_FROM_FT_FROM_WOOD_GASIFICATION_INVENTORIES, "3.7",),
(
FILEPATH_SYNFUEL_FROM_FT_FROM_WOOD_GASIFICATION_INVENTORIES,
"3.7",
),
(
FILEPATH_SYNFUEL_FROM_FT_FROM_WOOD_GASIFICATION_WITH_CCS_INVENTORIES,
"3.7",
),
(FILEPATH_SYNFUEL_FROM_FT_FROM_COAL_GASIFICATION_INVENTORIES, "3.7",),
(
FILEPATH_SYNFUEL_FROM_FT_FROM_COAL_GASIFICATION_INVENTORIES,
"3.7",
),
(
FILEPATH_SYNFUEL_FROM_FT_FROM_COAL_GASIFICATION_WITH_CCS_INVENTORIES,
"3.7",
Expand Down Expand Up @@ -856,7 +862,12 @@ def update_dac(self) -> None:
# use multiprocessing to speed up the process
with ProcessPool(processes=multiprocessing.cpu_count()) as pool:
args = [
(scenario, self.version, self.system_model, self.modified_datasets,)
(
scenario,
self.version,
self.system_model,
self.modified_datasets,
)
for scenario in self.scenarios
]
results = pool.starmap(_update_dac, args)
Expand All @@ -877,7 +888,12 @@ def update_fuels(self) -> None:
# use multiprocessing to speed up the process
with ProcessPool(processes=multiprocessing.cpu_count()) as pool:
args = [
(scenario, self.version, self.system_model, self.modified_datasets,)
(
scenario,
self.version,
self.system_model,
self.modified_datasets,
)
for scenario in self.scenarios
]
results = pool.starmap(_update_fuels, args)
Expand All @@ -898,7 +914,12 @@ def update_cement(self) -> None:
# use multiprocessing to speed up the process
with ProcessPool(processes=multiprocessing.cpu_count()) as pool:
args = [
(scenario, self.version, self.system_model, self.modified_datasets,)
(
scenario,
self.version,
self.system_model,
self.modified_datasets,
)
for scenario in self.scenarios
]
results = pool.starmap(_update_cement, args)
Expand All @@ -919,7 +940,12 @@ def update_steel(self) -> None:
# use multiprocessing to speed up the process
with ProcessPool(processes=multiprocessing.cpu_count()) as pool:
args = [
(scenario, self.version, self.system_model, self.modified_datasets,)
(
scenario,
self.version,
self.system_model,
self.modified_datasets,
)
for scenario in self.scenarios
]
results = pool.starmap(_update_steel, args)
Expand Down Expand Up @@ -1200,7 +1226,9 @@ def write_superstructure_db_to_brightway(
)

write_brightway2_database(
data=self.database, name=name, reset_codes=True,
data=self.database,
name=name,
reset_codes=True,
)

# generate scenario report
Expand Down Expand Up @@ -1268,7 +1296,8 @@ def write_db_to_brightway(self, name: [str, List[str]] = None):

for scen, scenario in enumerate(self.scenarios):
write_brightway2_database(
scenario["database"], name[scen],
scenario["database"],
name[scen],
)
# generate scenario report
self.generate_scenario_report()
Expand Down
45 changes: 37 additions & 8 deletions premise/export.py
Original file line number Diff line number Diff line change
Expand Up @@ -310,7 +310,12 @@ def create_codes_and_names_of_tech_matrix(database: List[dict]):
:rtype: dict
"""
return {
(i["name"], i["reference product"], i["unit"], i["location"],): i["code"]
(
i["name"],
i["reference product"],
i["unit"],
i["location"],
): i["code"]
for i in database
}

Expand All @@ -332,7 +337,10 @@ def biosphere_flows_dictionary(version):
csv_dict = {}

with open(fp, encoding="utf-8") as file:
input_dict = csv.reader(file, delimiter=get_delimiter(filepath=fp),)
input_dict = csv.reader(
file,
delimiter=get_delimiter(filepath=fp),
)
for row in input_dict:
csv_dict[(row[0], row[1], row[2], row[3])] = row[-1]

Expand Down Expand Up @@ -534,7 +542,9 @@ def build_datapackage(df, inventories, list_scenarios, ei_version, name):
"version": ei_version,
"type": "source",
},
{"name": "biosphere3",},
{
"name": "biosphere3",
},
]
package.descriptor["scenarios"] = [
{
Expand Down Expand Up @@ -1022,7 +1032,10 @@ class Export:
"""

def __init__(
self, scenario: dict = None, filepath: Path = None, version: str = None,
self,
scenario: dict = None,
filepath: Path = None,
version: str = None,
):
self.db = scenario["database"]
self.model = scenario["model"]
Expand Down Expand Up @@ -1131,15 +1144,23 @@ def export_db_to_matrices(self):

# Export A matrix
with open(self.filepath / "A_matrix.csv", "w", encoding="utf-8") as file:
writer = csv.writer(file, delimiter=";", lineterminator="\n",)
writer = csv.writer(
file,
delimiter=";",
lineterminator="\n",
)
writer.writerow(["index of activity", "index of product", "value"])
rows = self.create_A_matrix_coordinates()
for row in rows:
writer.writerow(row)

# Export A index
with open(self.filepath / "A_matrix_index.csv", "w", encoding="utf-8") as file:
writer = csv.writer(file, delimiter=";", lineterminator="\n",)
writer = csv.writer(
file,
delimiter=";",
lineterminator="\n",
)
index_A = create_index_of_A_matrix(self.db)
for d in index_A:
data = list(d) + [index_A[d]]
Expand All @@ -1149,15 +1170,23 @@ def export_db_to_matrices(self):

# Export B matrix
with open(self.filepath / "B_matrix.csv", "w", encoding="utf-8") as file:
writer = csv.writer(file, delimiter=";", lineterminator="\n",)
writer = csv.writer(
file,
delimiter=";",
lineterminator="\n",
)
writer.writerow(["index of activity", "index of biosphere flow", "value"])
rows = self.create_B_matrix_coordinates()
for row in rows:
writer.writerow(row)

# Export B index
with open(self.filepath / "B_matrix_index.csv", "w", encoding="utf-8") as file:
writer = csv.writer(file, delimiter=";", lineterminator="\n",)
writer = csv.writer(
file,
delimiter=";",
lineterminator="\n",
)
for d in index_B:
data = list(d) + [index_B[d]]
writer.writerow(data)
Expand Down
30 changes: 23 additions & 7 deletions premise/inventory_imports.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,10 @@ def get_biosphere_code(version) -> dict:
csv_dict = {}

with open(fp, encoding="utf-8") as file:
input_dict = csv.reader(file, delimiter=get_delimiter(filepath=fp),)
input_dict = csv.reader(
file,
delimiter=get_delimiter(filepath=fp),
)
for row in input_dict:
csv_dict[(row[0], row[1], row[2], row[3])] = row[4]

Expand All @@ -87,7 +90,8 @@ def generate_migration_maps(origin: str, destination: str) -> Dict[str, list]:

with open(FILEPATH_MIGRATION_MAP, "r", encoding="utf-8") as read_obj:
csv_reader = csv.reader(
read_obj, delimiter=get_delimiter(filepath=FILEPATH_MIGRATION_MAP),
read_obj,
delimiter=get_delimiter(filepath=FILEPATH_MIGRATION_MAP),
)
next(csv_reader)
for row in csv_reader:
Expand Down Expand Up @@ -507,7 +511,15 @@ def correct_product_field(self, exc: tuple) -> [str, None]:
return candidate["reference product"]

self.list_unlinked.append(
(exc[0], exc[-1], exc[1], None, exc[2], "technosphere", self.path.name,)
(
exc[0],
exc[-1],
exc[1],
None,
exc[2],
"technosphere",
self.path.name,
)
)

return None
Expand Down Expand Up @@ -680,8 +692,10 @@ def prepare_inventory(self) -> None:
)

if self.system_model == "consequential":
self.import_db.data = check_for_datasets_compliance_with_consequential_database(
self.import_db.data, self.consequential_blacklist
self.import_db.data = (
check_for_datasets_compliance_with_consequential_database(
self.import_db.data, self.consequential_blacklist
)
)

self.import_db.data = remove_categories(self.import_db.data)
Expand Down Expand Up @@ -831,8 +845,10 @@ def prepare_inventory(self):
)

if self.system_model == "consequential":
self.import_db.data = check_for_datasets_compliance_with_consequential_database(
self.import_db.data, self.consequential_blacklist
self.import_db.data = (
check_for_datasets_compliance_with_consequential_database(
self.import_db.data, self.consequential_blacklist
)
)

self.import_db.data = remove_categories(self.import_db.data)
Expand Down
Loading

0 comments on commit f2de32d

Please sign in to comment.