From e6691a33557a02384234922e448ad290a3be6b12 Mon Sep 17 00:00:00 2001 From: romainsacchi Date: Sun, 5 Nov 2023 15:27:32 +0100 Subject: [PATCH] Implement transport in markets --- .../corrections.yaml | 130 ++++++++++++++++- premise/metals.py | 101 +++++++------ premise/transformation.py | 137 +++++++++++------- 3 files changed, 276 insertions(+), 92 deletions(-) diff --git a/premise/data/metals/post-allocation correction/corrections.yaml b/premise/data/metals/post-allocation correction/corrections.yaml index c5a329ca..0a72cda4 100644 --- a/premise/data/metals/post-allocation correction/corrections.yaml +++ b/premise/data/metals/post-allocation correction/corrections.yaml @@ -88,6 +88,7 @@ categories: natural resource::in ground amount: 0.9288 unit: kilogram + - name: platinum group metal, extraction and refinery operations reference product: gold location: ZA @@ -96,4 +97,131 @@ name: Gold categories: natural resource::in ground amount: 0.9632 - unit: kilogram \ No newline at end of file + unit: kilogram + +- name: primary zinc production from concentrate + reference product: cadmium + unit: kilogram + additional flow: + name: Cadmium + categories: natural resource::in ground + amount: 1.0 + unit: kilogram + +- name: cadmium production, primary + reference product: cadmium + unit: kilogram + additional flow: + name: Cadmium + categories: natural resource::in ground + amount: 1.0 + unit: kilogram + + +- name: rare earth oxides production, from rare earth carbonate concentrate + reference product: cerium oxide + unit: kilogram + additional flow: + name: Cerium + categories: natural resource::in ground + amount: 0.814 + unit: kilogram + +- name: rare earth oxides production, from rare earth carbonate concentrate + reference product: dysprosium oxide + unit: kilogram + additional flow: + name: Dysprosium + categories: natural resource::in ground + amount: 0.871 + unit: kilogram + +- name: rare earth oxides production, from rare earth carbonate concentrate + reference product: erbium oxide + unit: kilogram + additional flow: + name: Erbium + categories: natural resource::in ground + amount: 0.875 + unit: kilogram + +- name: rare earth oxides production, from rare earth carbonate concentrate + reference product: europium oxide + unit: kilogram + additional flow: + name: Europium + categories: natural resource::in ground + amount: 0.864 + unit: kilogram + +- name: rare earth oxides production, from rare earth carbonate concentrate + reference product: gadolinium oxide + unit: kilogram + additional flow: + name: Gadolinium + categories: natural resource::in ground + amount: 0.868 + unit: kilogram + +- name: graphite ore mining + reference product: graphite ore, mined + unit: kilogram + additional flow: + name: Graphite + categories: natural resource::in ground + amount: 1.0 + unit: kilogram + +- name: indium production + reference product: indium + unit: kilogram + additional flow: + name: Indium + categories: natural resource::in ground + amount: 1.0 + unit: kilogram + +- name: rare earth oxides production, from rare earth carbonate concentrate + reference product: lanthanum oxide + unit: kilogram + additional flow: + name: Lanthanum + categories: natural resource::in ground + amount: 0.853 + unit: kilogram + +- name: lithium carbonate production, from spodumene + reference product: lithium carbonate + unit: kilogram + additional flow: + name: Lithium + categories: natural resource::in ground + amount: 0.188 + unit: kilogram + +- name: lithium carbonate production, from concentrated brine + reference product: lithium carbonate + unit: kilogram + additional flow: + name: Lithium + categories: natural resource::in ground + amount: 0.188 + unit: kilogram + +- name: magnesium production, electrolysis + reference product: magnesium + unit: kilogram + additional flow: + name: Magnesium + categories: natural resource::in ground + amount: 1.0 + unit: kilogram + +- name: rare earth oxides production, from rare earth carbonate concentrate + reference product: neodymium oxide + unit: kilogram + additional flow: + name: Neodymium + categories: natural resource::in ground + amount: 0.857 + unit: kilogram diff --git a/premise/metals.py b/premise/metals.py index dd52eab0..4f3ff100 100644 --- a/premise/metals.py +++ b/premise/metals.py @@ -453,42 +453,48 @@ def post_allocation_correction(self): factors_list = load_post_allocation_correction_factors() for dataset in factors_list: - ds = ws.get_one( - self.database, + filters = [ ws.equals("name", dataset["name"]), ws.equals("reference product", dataset["reference product"]), - ws.equals("location", dataset["location"]), ws.equals("unit", dataset["unit"]), - ) - ds["exchanges"].append( - { - "name": dataset["additional flow"]["name"], - "amount": dataset["additional flow"]["amount"], - "unit": dataset["additional flow"]["unit"], - "type": "biosphere", - "categories": tuple( - dataset["additional flow"]["categories"].split("::") - ), - "input": ( - "biosphere3", - self.biosphere_flow_codes[ - dataset["additional flow"]["name"], - dataset["additional flow"]["categories"].split("::")[0], - dataset["additional flow"]["categories"].split("::")[1], - dataset["additional flow"]["unit"], - ], - ), - } - ) + ] - if "log parameters" not in ds: - ds["log parameters"] = {} + if "location" in dataset: + filters.append(ws.equals("location", dataset["location"])) - ds["log parameters"]["post-allocation correction"] = dataset[ - "additional flow" - ]["amount"] + for ds in ws.get_many( + self.database, + *filters, + ): + ds["exchanges"].append( + { + "name": dataset["additional flow"]["name"], + "amount": dataset["additional flow"]["amount"], + "unit": dataset["additional flow"]["unit"], + "type": "biosphere", + "categories": tuple( + dataset["additional flow"]["categories"].split("::") + ), + "input": ( + "biosphere3", + self.biosphere_flow_codes[ + dataset["additional flow"]["name"], + dataset["additional flow"]["categories"].split("::")[0], + dataset["additional flow"]["categories"].split("::")[1], + dataset["additional flow"]["unit"], + ], + ), + } + ) + + if "log parameters" not in ds: + ds["log parameters"] = {} - self.write_log(ds, "updated") + ds["log parameters"]["post-allocation correction"] = dataset[ + "additional flow" + ]["amount"] + + self.write_log(ds, "updated") def create_new_mining_activity( self, @@ -496,6 +502,7 @@ def create_new_mining_activity( reference_product: str, new_locations: dict, geography_mapping=None, + shares: dict = None, ) -> dict: """ Create a new mining activity in a new location. @@ -510,14 +517,14 @@ def create_new_mining_activity( ] } - geography_mapping = {k: v for k, v in geography_mapping.items() if k != v} - # Get the original datasets datasets = self.fetch_proxies( name=name, ref_prod=reference_product, regions=new_locations.values(), geo_mapping=geography_mapping, + production_variable=shares, + exact_product_match=True, ) return datasets @@ -573,9 +580,6 @@ def get_shares(self, df: pd.DataFrame, new_locations: dict, name, ref_prod) -> d share = share.values[0] shares[(name, ref_prod, short_location)] = share - # normalize shares to 1 - shares = {k: v / sum(shares.values()) for k, v in shares.items()} - return shares def get_geo_mapping(self, df: pd.DataFrame, new_locations: dict) -> dict: @@ -612,7 +616,11 @@ def create_region_specific_markets(self, df: pd.DataFrame) -> List[dict]: # if not, we create it datasets = self.create_new_mining_activity( - name, ref_prod, new_locations, geography_mapping + name, + ref_prod, + new_locations, + geography_mapping, + {k[2]: v for k, v in shares.items()}, ) # add new datasets to database @@ -638,17 +646,25 @@ def create_region_specific_markets(self, df: pd.DataFrame) -> List[dict]: new_exchanges.extend( [ { - "name": dataset["name"], - "product": dataset["reference product"], - "location": dataset["location"], - "unit": dataset["unit"], - "amount": shares[(name, ref_prod, dataset["location"])], + "name": k[0], + "product": k[1], + "location": k[2], + "unit": "kilogram", + "amount": share, "type": "technosphere", } - for dataset in datasets.values() + for k, share in shares.items() ] ) + # normalize amounts to 1 + total = sum([exc["amount"] for exc in new_exchanges]) + new_exchanges = [ + {k: v for k, v in exc.items() if k != "amount"} + | {"amount": exc["amount"] / total} + for exc in new_exchanges + ] + return new_exchanges def create_market(self, metal, df): @@ -691,6 +707,7 @@ def create_market(self, metal, df): # add mining exchanges dataset["exchanges"].extend(self.create_region_specific_markets(df)) + # add transport exchanges trspt_exc = self.add_transport_to_market(dataset, metal) if len(trspt_exc) > 0: diff --git a/premise/transformation.py b/premise/transformation.py index 57a15fe3..8333272b 100644 --- a/premise/transformation.py +++ b/premise/transformation.py @@ -597,7 +597,8 @@ def fetch_proxies( geo_mapping: dict = None, delete_original_dataset=False, empty_original_activity=True, - exact_match=True, + exact_name_match=True, + exact_product_match=False, ) -> Dict[str, dict]: """ Fetch dataset proxies, given a dataset `name` and `reference product`. @@ -627,16 +628,19 @@ def fetch_proxies( for region in d_iam_to_eco: # build filters - if exact_match is True: + if exact_name_match is True: filters = [ ws.equals("name", name), - ws.equals("reference product", ref_prod), ] else: filters = [ - ws.equals("name", name), - ws.contains("reference product", ref_prod), + ws.contains("name", name), ] + if exact_product_match is True: + filters.append(ws.equals("reference product", ref_prod)) + else: + filters.append(ws.contains("reference product", ref_prod)) + filters.append(ws.equals("location", d_iam_to_eco[region])) try: @@ -675,19 +679,22 @@ def fetch_proxies( production_variable, ] - if all( - i in self.iam_data.production_volumes.variables - for i in production_variable - ): - prod_vol = ( - self.iam_data.production_volumes.sel( - region=region, variables=production_variable + if isinstance(production_variable, list): + if all( + i in self.iam_data.production_volumes.variables + for i in production_variable + ): + prod_vol = ( + self.iam_data.production_volumes.sel( + region=region, variables=production_variable + ) + .interp(year=self.year) + .sum(dim="variables") + .values.item(0) ) - .interp(year=self.year) - .sum(dim="variables") - .values.item(0) - ) + elif isinstance(production_variable, dict): + prod_vol = production_variable[region] else: prod_vol = 1 else: @@ -744,7 +751,7 @@ def empty_original_datasets( self, name: str, ref_prod: str, - production_variable: str, + production_variable: [str, dict], loc_map: Dict[str, str], regions: List[str] = None, ) -> None: @@ -769,7 +776,7 @@ def empty_original_datasets( existing_datasets = ws.get_many( self.database, ws.equals("name", name), - ws.contains("reference product", ref_prod), + ws.equals("reference product", ref_prod), ws.exclude( ws.either(*[ws.equals("location", loc) for loc in self.regions]) ), @@ -777,13 +784,13 @@ def empty_original_datasets( for existing_ds in existing_datasets: if existing_ds["location"] in mapping: - iam_locs = list(mapping[existing_ds["location"]]) + locations = list(mapping[existing_ds["location"]]) else: - iam_locs = [self.ecoinvent_to_iam_loc[existing_ds["location"]]] - iam_locs = [loc for loc in iam_locs if loc in regions] + locations = [self.ecoinvent_to_iam_loc[existing_ds["location"]]] + locations = [loc for loc in locations if loc in regions] - if iam_locs == ["World"]: - iam_locs = [r for r in regions if r != "World"] + if locations == ["World"]: + locations = [r for r in regions if r != "World"] # add tag existing_ds["has_downstream_consumer"] = False @@ -803,7 +810,7 @@ def empty_original_datasets( _ = lambda x: x if x != 0.0 else 1.0 - if len(iam_locs) == 1: + if len(locations) == 1: existing_ds["exchanges"].append( { "name": existing_ds["name"], @@ -811,34 +818,31 @@ def empty_original_datasets( "amount": 1.0, "unit": existing_ds["unit"], "uncertainty type": 0, - "location": iam_locs[0], + "location": locations[0], "type": "technosphere", } ) - else: - for iam_loc in iam_locs: - if production_variable and all( - i in self.iam_data.production_volumes.variables.values.tolist() - for i in production_variable - ): - share = ( - self.iam_data.production_volumes.sel( - region=iam_loc, variables=production_variable - ) - .interp(year=self.year) - .sum(dim="variables") - .values.item(0) - ) / _( - self.iam_data.production_volumes.sel( - region=iam_locs, variables=production_variable - ) - .interp(year=self.year) - .sum(dim=["variables", "region"]) - .values.item(0) - ) - else: - share = 1 / len(iam_locs) + elif isinstance(production_variable, str) and all( + i in self.iam_data.production_volumes.variables.values.tolist() + for i in production_variable + ): + for location in locations: + share = ( + self.iam_data.production_volumes.sel( + region=location, variables=production_variable + ) + .interp(year=self.year) + .sum(dim="variables") + .values.item(0) + ) / _( + self.iam_data.production_volumes.sel( + region=locations, variables=production_variable + ) + .interp(year=self.year) + .sum(dim=["variables", "region"]) + .values.item(0) + ) if share > 0: existing_ds["exchanges"].append( @@ -848,11 +852,46 @@ def empty_original_datasets( "amount": share, "unit": existing_ds["unit"], "uncertainty type": 0, - "location": iam_loc, + "location": location, "type": "technosphere", } ) + elif isinstance(production_variable, dict): + existing_ds["exchanges"].extend( + [ + { + "name": existing_ds["name"], + "product": existing_ds["reference product"], + "amount": share, + "unit": existing_ds["unit"], + "uncertainty type": 0, + "location": loc, + "type": "technosphere", + } + for loc, share in production_variable.items() + ] + ) + + else: + share = 1 / len(locations) + + if share > 0: + existing_ds["exchanges"].extend( + [ + { + "name": existing_ds["name"], + "product": existing_ds["reference product"], + "amount": share, + "unit": existing_ds["unit"], + "uncertainty type": 0, + "location": location, + "type": "technosphere", + } + for location in locations + ] + ) + # add dataset to emptied datasets list self.modified_datasets[(self.model, self.scenario, self.year)][ "emptied"