Skip to content

Commit

Permalink
Fix some merge issues
Browse files Browse the repository at this point in the history
  • Loading branch information
romainsacchi committed Nov 3, 2023
1 parent 17554a9 commit 38d84a3
Show file tree
Hide file tree
Showing 4 changed files with 94 additions and 5 deletions.
18 changes: 18 additions & 0 deletions premise/activity_maps.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@
GAINS_MAPPING = (
DATA_DIR / "GAINS_emission_factors" / "gains_ecoinvent_sectoral_mapping.yaml"
)
ACTIVITIES_METALS_MAPPING = DATA_DIR / "metals" / "activities_mapping.yml"
METALS_MAPPING = DATA_DIR / "metals" / "metals_mapping.yml"


def get_mapping(filepath: Path, var: str, model: str = None) -> dict:
Expand All @@ -47,6 +49,7 @@ def get_mapping(filepath: Path, var: str, model: str = None) -> dict:
return mapping



def act_fltr(
database: List[dict],
fltr: Union[str, List[str]] = None,
Expand Down Expand Up @@ -154,6 +157,21 @@ def __init__(
filepath=GAINS_MAPPING, var="ecoinvent_aliases"
)

self.activity_metals_filters = get_mapping(
filepath=ACTIVITIES_METALS_MAPPING, var="ecoinvent_aliases"
)
self.metals_filters = get_mapping(
filepath=METALS_MAPPING, var="ecoinvent_aliases"
)

def generate_activities_using_metals_map(self) -> dict:
"""
Filter ecoinvent processes related to metals.
Returns a dictionary with metal names as keys (see below) and
a set of related ecoinvent activities' names as values.
"""
return self.generate_sets_from_filters(self.activity_metals_filters)

def generate_gains_mapping_IAM(self, mapping):
EU_to_IAM_var = get_mapping(filepath=GAINS_MAPPING, var="gains_aliases_IAM")
new_map = defaultdict(set)
Expand Down
55 changes: 55 additions & 0 deletions premise/ecoinvent_modification.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
from .inventory_imports import AdditionalInventory, DefaultInventory
from .report import generate_change_report, generate_summary_report
from .steel import _update_steel
from .metals import Metals, _update_metals
from .transport import _update_vehicles
from .utils import (
clear_existing_cache,
Expand Down Expand Up @@ -154,6 +155,9 @@
FILEPATH_VANADIUM = INVENTORY_DIR / "lci-vanadium.xlsx"
FILEPATH_VANADIUM_REDOX_BATTERY = INVENTORY_DIR / "lci-vanadium-redox-flow-battery.xlsx"
FILEPATH_HYDROGEN_TURBINE = INVENTORY_DIR / "lci-hydrogen-turbine.xlsx"
FILEPATH_GERMANIUM = INVENTORY_DIR / "lci-germanium.xlsx"
FILEPATH_RHENIUM = INVENTORY_DIR / "lci-rhenium.xlsx"
FILEPATH_PGM = INVENTORY_DIR / "lci-PGM.xlsx"

config = load_constants()

Expand Down Expand Up @@ -778,6 +782,9 @@ def __import_inventories(self, keep_uncertainty_data: bool = False) -> List[dict
(FILEPATH_CSP, "3.9"),
(FILEPATH_VANADIUM, "3.8"),
(FILEPATH_VANADIUM_REDOX_BATTERY, "3.9"),
(FILEPATH_GERMANIUM, "3.9"),
(FILEPATH_RHENIUM, "3.9"),
(FILEPATH_PGM, "3.8"),
]
for filepath in filepaths:
# make an exception for FILEPATH_OIL_GAS_INVENTORIES
Expand Down Expand Up @@ -1045,6 +1052,54 @@ def update_steel(self) -> None:

print("Done!\n")

def update_metals(self) -> None:
"""
This method will update the metals use in inventories
with the data from DLR.
"""

print("\n////////////////////////////// METALS ///////////////////////////////")

# use multiprocessing to speed up the process
if self.multiprocessing:
with ProcessPool(processes=multiprocessing.cpu_count()) as pool:
args = [
(
scenario,
self.version,
self.system_model,
self.modified_datasets,
)
for scenario in self.scenarios
]
results = pool.starmap(_update_metals, args)

for s, scenario in enumerate(self.scenarios):
self.scenarios[s] = results[s][0]
self.modified_datasets[
(scenario["model"], scenario["pathway"], scenario["year"])
] = results[s][1][
(scenario["model"], scenario["pathway"], scenario["year"])
]
else:
for scenario in self.scenarios:
if "exclude" not in scenario or "update_metals" not in scenario["exclude"]:
metals = Metals(
database=scenario["database"],
year=scenario["year"],
model=scenario["model"],
pathway=scenario["pathway"],
iam_data=scenario["iam data"],
version=self.version,
system_model=self.system_model,
modified_datasets=self.modified_datasets,
)

metals.create_metal_markets()
scenario["database"] = metals.database

print("Done!\n")

def update_cars(self) -> None:
"""
This method will update the cars inventories
Expand Down
1 change: 0 additions & 1 deletion premise/metals.py
Original file line number Diff line number Diff line change
Expand Up @@ -359,7 +359,6 @@ def create_new_mining_activity(
ref_prod=reference_product,
regions=new_locations.values(),
# geo_mapping=geo_mapping,
exact_match=True,
)

return datasets
Expand Down
25 changes: 21 additions & 4 deletions premise/transformation.py
Original file line number Diff line number Diff line change
Expand Up @@ -563,6 +563,7 @@ def fetch_proxies(
regions=None,
delete_original_dataset=False,
empty_original_activity=True,
exact_match=True,
) -> Dict[str, dict]:
"""
Fetch dataset proxies, given a dataset `name` and `reference product`.
Expand Down Expand Up @@ -591,18 +592,34 @@ def fetch_proxies(
ds_name, ds_ref_prod = [None, None]

for region in d_iam_to_eco:

# build filters
if exact_match is True:
filters = [
ws.equals("name", name),
ws.equals("reference product", ref_prod),
]
else:
filters = [
ws.equals("name", name),
ws.contains("reference product", ref_prod),
]
filters.append(ws.equals("location", d_iam_to_eco[region]))

try:
dataset = ws.get_one(
self.database,
ws.equals("name", name),
ws.contains("reference product", ref_prod),
ws.equals("location", d_iam_to_eco[region]),
*filters,
)
except ws.MultipleResults as err:
results = ws.get_many(
self.database,
*filters,
)
print(
err,
"A single dataset was expected, "
f"but found more than one for: {name, ref_prod}",
f"but found more than one for: {name, ref_prod}, : {[(r['name'], r['reference product'], r['location']) for r in results]}",
)

if (name, ref_prod, region, dataset["unit"]) not in self.modified_datasets[
Expand Down

0 comments on commit 38d84a3

Please sign in to comment.