From 2998266da52bcee207275151bf8bad6cde61f7b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Kr=C3=B6ber?= Date: Fri, 26 Jan 2024 23:23:58 +0100 Subject: [PATCH 1/2] feat: Preview mode & Caching :gift: --- semantique/processor/core.py | 124 ++++++++++++++++++++++++++++++++--- semantique/recipe.py | 26 +++++++- 2 files changed, 137 insertions(+), 13 deletions(-) diff --git a/semantique/processor/core.py b/semantique/processor/core.py index 329151e6..af19f165 100644 --- a/semantique/processor/core.py +++ b/semantique/processor/core.py @@ -1,6 +1,5 @@ import geopandas as gpd import numpy as np - import copy import inspect import logging @@ -45,12 +44,16 @@ class QueryProcessor(): when applying processes, and promote them if necessary? Keeping track of value types also means throwing errors whenever a value type is not supported by a specific process. - + preview : :obj:`bool` + Run the query processor with reduced resolution to test the recipe execution. + Preview-runs are necessary if cache should be used. + cache : :obj:`Cache` + The cache object that is used to store data layers. """ def __init__(self, recipe, datacube, mapping, extent, custom_verbs = None, custom_operators = None, custom_reducers = None, - track_types = True): + track_types = True, preview = False, cache = None): self._eval_obj = [None] self._response = {} self.recipe = recipe @@ -61,6 +64,11 @@ def __init__(self, recipe, datacube, mapping, extent, custom_verbs = None, self.custom_verbs = custom_verbs self.custom_operators = custom_operators self.custom_reducers = custom_reducers + self.preview = preview + if cache is None: + self.cache = Cache() + else: + self.cache = cache @property def response(self): @@ -161,6 +169,24 @@ def track_types(self): def track_types(self, value): self._track_types = value + @property + def cache(self): + """:obj:`dict`: Cache of data layers for the query execution.""" + return self._cache + + @cache.setter + def cache(self, value): + self._cache = value + + @property + def preview(self): + """:obj:`bool`: Is the query being processed in preview mode.""" + return self._preview + + @preview.setter + def preview(self, value): + self._preview = value + @classmethod def parse(cls, recipe, datacube, mapping, space, time, spatial_resolution, crs = None, tz = None, **config): @@ -222,9 +248,17 @@ def parse(cls, recipe, datacube, mapping, space, time, mapping. Such functionality is not implemented yet. """ - logger.info("Started parsing the semantic query") + # Step 0: Retrieve resolution for coarse-scale preview analyses + if config.get("preview"): + logger.info("--- Preview mode (reduced resolution) ---") + output_shape = (5, 5) + bounds = space.features.to_crs(crs).total_bounds + x_res = (bounds[2] - bounds[0]) / output_shape[1] + y_res = (bounds[3] - bounds[1]) / output_shape[0] + spatial_resolution = [-y_res, x_res] # Step I: Parse the spatio-temporal extent. # It needs to be stored as a 2D array with dimensions space and time. + logger.info("Started parsing the semantic query") extent = utils.parse_extent( spatial_extent = space, temporal_extent = time, @@ -357,6 +391,8 @@ def handle_concept(self, block): extent = self._extent, datacube = self._datacube, eval_obj = self._get_eval_obj(), + preview = self._preview, + cache = self._cache, custom_verbs = self._custom_verbs, custom_operators = self._custom_operators, custom_reducers = self._custom_reducers, @@ -378,12 +414,26 @@ def handle_layer(self, block): :obj:`xarray.DataArray` """ - logger.debug(f"Retrieving layer {block['reference']}") - out = self._datacube.retrieve( - *block["reference"], - extent = self._extent - ) + # get data + layer_key = "_".join(block["reference"]) + if layer_key in self._cache.data: + logger.debug(f"Retrieving layer (from cache) {block['reference']}") + out = self._cache.load(layer_key) + else: + logger.debug(f"Retrieving layer (from src) {block['reference']}") + out = self._datacube.retrieve( + *block["reference"], + extent = self._extent + ) logger.debug(f"Retrieved layer {block['reference']}:\n{out}") + # update cache + if self._preview: + self._cache.build(block['reference']) + else: + self._cache.update(layer_key, out) + logger.debug("Cache updated") + logger.debug(f"Sequence of layers: {self._cache._seq}") + logger.debug(f"Currently cached layers: {list(self._cache._data.keys())}") return out def handle_result(self, block): @@ -1153,4 +1203,58 @@ def _reset_eval_obj(self): del self._eval_obj[-1] def _set_eval_obj(self, obj): - self._eval_obj.append(obj) \ No newline at end of file + self._eval_obj.append(obj) + +class Cache: + """Cache that takes care of tracking the data references in their + order of evaluation and retaining data layers in RAM if they are still + needed for the further execution of the semantic query. + """ + def __init__(self): + self._seq = [] + self._data = {} + + @property + def seq(self): + """list: Sequence of references.""" + return self._seq + + @property + def data(self): + """dict: Data stored in the cache.""" + return self._data + + def build(self, ref): + """Build of the sequence of data references.""" + self._add_to_seq(ref) + + def load(self, key): + """Load data layer from cache.""" + return self._data.get(key, None) + + def update(self, key, data): + """Modify cache content during evaluation.""" + if len(self._seq): + current = self._seq[0] + self._rm_from_seq(0) + if current in self._seq: + self._add_data(key, data) + else: + if key in self._data: + self._rm_data(key) + + def _add_to_seq(self, ref): + """Update sequence of data references.""" + self._seq.append(ref) + + def _rm_from_seq(self, idx): + """Update sequence of data references.""" + del self._seq[idx] + + def _add_data(self, key, value): + """Add data layer to cache.""" + self._data[key] = value + + def _rm_data(self, key): + """Remove data layer from cache.""" + del self._data[key] \ No newline at end of file diff --git a/semantique/recipe.py b/semantique/recipe.py index 8d37aa9e..76569e52 100644 --- a/semantique/recipe.py +++ b/semantique/recipe.py @@ -27,7 +27,7 @@ def __init__(self, results = None): obj = {} if results is None else results super(QueryRecipe, self).__init__(obj) - def execute(self, datacube, mapping, space, time, **config): + def execute(self, datacube, mapping, space, time, caching=False, **config): """Execute a query recipe. This function initializes a :obj:`processor.core.QueryProcessor` instance @@ -44,6 +44,11 @@ def execute(self, datacube, mapping, space, time, **config): The spatial extent in which the query should be processed. time : TemporalExtent The temporal extent in which the query should be processed. + caching : :obj:`bool` + Should the query processor cache the data references as provided by the + mapped concepts? Enabling caching increases the memory footprint while + reducing the I/O time to read data. Will be used only if query recipe + contains concepts referencing the same data layer multiple times. **config: Additional configuration parameters forwarded to :func:`QueryProcessor.parse `. @@ -72,5 +77,20 @@ def execute(self, datacube, mapping, space, time, **config): >>> recipe.execute(dc, mapping, space, time, **config) """ - qp = QueryProcessor.parse(self, datacube, mapping, space, time, **config) - return qp.optimize().execute() \ No newline at end of file + if caching: + # preview-run to set up cache + preview_config = config + preview_config["preview"] = True + preview_config["cache"] = None + qp = QueryProcessor.parse(self, datacube, mapping, space, time, **preview_config) + _ = qp.optimize().execute() + # main run + main_config = config + main_config["preview"] = False + main_config["cache"] = qp.cache + qp = QueryProcessor.parse(self, datacube, mapping, space, time, **main_config) + return qp.optimize().execute() + else: + # executing the query recipe + qp = QueryProcessor.parse(self, datacube, mapping, space, time, **config) + return qp.optimize().execute() \ No newline at end of file From ce2560df916e26522f38698ed8a0dabe14b56ee2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Kr=C3=B6ber?= Date: Fri, 26 Jan 2024 23:43:20 +0100 Subject: [PATCH 2/2] docs: Preliminary demo of caching :books: --- demo/cache_tests.ipynb | 546 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 546 insertions(+) create mode 100644 demo/cache_tests.ipynb diff --git a/demo/cache_tests.ipynb b/demo/cache_tests.ipynb new file mode 100644 index 00000000..3d94a747 --- /dev/null +++ b/demo/cache_tests.ipynb @@ -0,0 +1,546 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "C:\\Users\\felix\\AppData\\Roaming\\Python\\Python310\\site-packages\\geopandas\\_compat.py:124: UserWarning: The Shapely GEOS version (3.11.2-CAPI-1.17.2) is incompatible with the GEOS version PyGEOS was compiled with (3.10.1-CAPI-1.16.0). Conversions between both will be slow.\n", + " warnings.warn(\n", + "C:\\Users\\felix\\AppData\\Local\\Temp/ipykernel_7388/2560623581.py:1: DeprecationWarning: Shapely 2.0 is installed, but because PyGEOS is also installed, GeoPandas still uses PyGEOS by default. However, starting with version 0.14, the default will switch to Shapely. To force to use Shapely 2.0 now, you can either uninstall PyGEOS or set the environment variable USE_PYGEOS=0. You can do this before starting the Python process, or in your code before importing geopandas:\n", + "\n", + "import os\n", + "os.environ['USE_PYGEOS'] = '0'\n", + "import geopandas\n", + "\n", + "In the next release, GeoPandas will switch to using Shapely by default, even if PyGEOS is installed. If you only have PyGEOS installed to get speed-ups, this switch should be smooth. However, if you are using PyGEOS directly (calling PyGEOS functions on geometries from GeoPandas), this will then stop working and you are encouraged to migrate from PyGEOS to Shapely 2.0 (https://shapely.readthedocs.io/en/latest/migration_pygeos.html).\n", + " import geopandas as gpd\n" + ] + } + ], + "source": [ + "import geopandas as gpd\n", + "import json\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import semantique as sq\n", + "import xarray as xr" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "# Load a mapping.\n", + "with open(\"files/mapping.json\", \"r\") as file:\n", + " mapping = sq.mapping.Semantique(json.load(file))\n", + "\n", + "# Represent an EO data cube.\n", + "with open(\"files/layout.json\", \"r\") as file:\n", + " dc = sq.datacube.GeotiffArchive(json.load(file), src = \"files/layers.zip\")\n", + "\n", + "# Set the spatio-temporal extent.\n", + "space = sq.SpatialExtent(gpd.read_file(\"files/footprint.geojson\"))\n", + "time = sq.TemporalExtent(\"2019-01-01\", \"2020-12-31\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## How the cache works" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "RAM memory requirements are proportional to the number of data layers that are stored as intermediate results. Caching data layers in RAM should only be done for those that are needed again when evaluating downstream parts of the recipe. This requires foresight about the evaluation order of the recipe, which accordingly requires a preview run preceding the actual evaluation. This preview run is performed by loading the data with drastically reduced spatial resolution (5x5 pixel grid). It resolves the data references and fills a cache by creating a list of the data references in the order in which they are evaluated. This list is then used dynamically during the actual evaluation of the recipe as a basis for keeping data layers in the cache and reading them from there if they are needed again.\n", + "\n", + "Below the result of the preview run is shown first to demonstrate what the resolved data references look like. The resulting initialised cache can then be fed as a context element to the QueryProcessor in a second step for the actual recipe execution." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "from semantique.processor.core import QueryProcessor\n", + "\n", + "# define a simple recipe for a cloudfree composite\n", + "recipe = sq.QueryRecipe()\n", + "red_band = sq.reflectance(\"s2_band04\")\n", + "green_band = sq.reflectance(\"s2_band03\")\n", + "blue_band = sq.reflectance(\"s2_band02\")\n", + "recipe[\"composite\"] = sq.collection(red_band, green_band, blue_band).\\\n", + " filter(sq.entity(\"cloud\").evaluate(\"not\")).\\\n", + " reduce(\"median\", \"time\").\\\n", + " concatenate(\"band\")\n", + "\n", + "# define context \n", + "context = {\n", + " \"datacube\": dc, \n", + " \"mapping\": mapping,\n", + " \"space\": space,\n", + " \"time\": time,\n", + " \"crs\": 3035, \n", + " \"tz\": \"UTC\", \n", + " \"spatial_resolution\": [-10, 10],\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[('reflectance', 's2_band04'),\n", + " ('reflectance', 's2_band03'),\n", + " ('reflectance', 's2_band02'),\n", + " ['atmosphere', 'colortype']]" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# step I: preview run\n", + "qp = QueryProcessor.parse(recipe, **{**context, \"preview\": True})\n", + "qp.optimize().execute()\n", + "qp.cache.seq" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(3, 563, 576)" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# step II: query processor execution\n", + "qp = QueryProcessor.parse(recipe, **{**context, \"cache\": qp.cache})\n", + "result = qp.optimize().execute()\n", + "result[\"composite\"].shape" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As you can see the preview run resolves the references to the data layers as they are provided by looking up the entities' references in the mapping.json. Note, that in the current case the result is not that interesting, though, since four different data layers are to be loaded. Therefore, there is nothing to be cached during recipe execution. Therefore the QueryProcessor will load all data layers from the referenced sources without storing any of them in the cache. \n", + "\n", + "As a user, however, you can directly initiate the entire caching workflow (preview & full resolution recipe execution) by setting the context parameter when calling `recipe.execute(..., caching=True)`. " + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "# same as above in a single step \n", + "result = recipe.execute(**{**context, \"caching\": True})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Assessment of cache performance" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's analyse some timing differences in executing a recipe with/without caching. Most importantly, the timing difference depends on...\n", + "* the redundancy of the data references in the recipe, i.e. if layers are called multiple times loading them from cache will reduce the overall time significantly\n", + "* the data source (EO data cube) from which they are loaded\n", + "\n", + "Especially for the later it should be noted that in this demo only data loaded from a locally stored geotiff (i.e. the GeoTiffArchive layout) are analysed. This is sort of the worst case for demonstrating the benefits of caching since the data is stored locally and is therfore quickly accessible. Also geotiffs that are not stored in cloud-optimised format (CoGs) require to load the whole data into memory even when running in preview mode just to evaluate the sequence of data layers.\n", + "\n", + "Consequently, you will observe that in almost all of the following cases, caching actually adds a small computational overhead. Keep in mind, however, that caching is designed for and particularly beneficial in case of STACCubes when loading data over the internet." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "# function to compare timing for given recipe \n", + "def eval_timing(recipe, caching=False):\n", + " context = {\n", + " \"datacube\": dc, \n", + " \"mapping\": mapping,\n", + " \"space\": space,\n", + " \"time\": time,\n", + " \"crs\": 3035, \n", + " \"tz\": \"UTC\", \n", + " \"spatial_resolution\": [-10, 10],\n", + " \"caching\": caching\n", + " }\n", + " res = recipe.execute(**context)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "# recipe I\n", + "recipe_I = sq.QueryRecipe()\n", + "red_band = sq.reflectance(\"s2_band04\")\n", + "green_band = sq.reflectance(\"s2_band03\")\n", + "blue_band = sq.reflectance(\"s2_band02\")\n", + "recipe_I[\"composite\"] = sq.collection(red_band, green_band, blue_band).\\\n", + " filter(sq.entity(\"cloud\").evaluate(\"not\")).\\\n", + " reduce(\"median\", \"time\").\\\n", + " concatenate(\"band\")" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "649 ms ± 14.7 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" + ] + } + ], + "source": [ + "%%timeit\n", + "# without caching\n", + "_ = eval_timing(recipe_I, False)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "998 ms ± 5.4 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" + ] + } + ], + "source": [ + "%%timeit\n", + "# with caching\n", + "_ = eval_timing(recipe_I, True)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "# recipe II\n", + "recipe_II = sq.QueryRecipe()\n", + "recipe_II[\"dates\"] = sq.entity(\"vegetation\").\\\n", + " filter(sq.self()).\\\n", + " assign_time().\\\n", + " reduce(\"first\", \"time\")" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "5.09 s ± 61.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" + ] + } + ], + "source": [ + "%%timeit\n", + "# without caching\n", + "_ = eval_timing(recipe_II, False)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "5.27 s ± 51.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" + ] + } + ], + "source": [ + "%%timeit\n", + "# with caching\n", + "_ = eval_timing(recipe_II, True)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "# recipe III\n", + "recipe_III = sq.QueryRecipe()\n", + "recipe_III[\"water_count_time\"] = sq.entity(\"water\").reduce(\"count\", \"time\")\n", + "recipe_III[\"vegetation_count_time\"] = sq.entity(\"vegetation\").reduce(\"count\", \"time\")\n", + "recipe_III[\"water_count_space\"] = sq.entity(\"water\").reduce(\"count\", \"space\")\n", + "recipe_III[\"vegetation_count_space\"] = sq.entity(\"vegetation\").reduce(\"count\", \"space\")" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "499 ms ± 5.31 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" + ] + } + ], + "source": [ + "%%timeit\n", + "# without caching\n", + "_ = eval_timing(recipe_III, False)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "547 ms ± 4.4 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" + ] + } + ], + "source": [ + "%%timeit\n", + "# with caching\n", + "_ = eval_timing(recipe_III, True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The more expressive examples for the STACCube are provided below. Note that they can't be executed for now (as STACCube in currently still under dev and not yet merged in the main branch). The question if caching brings significant advantages when loading data from a well-indexed OpenDataCube stored on a quickly accessible hot storage, remains to be assessed. " + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "from pystac_client import Client\n", + "from shapely.geometry import box\n", + "from semantique.processor.core import QueryProcessor\n", + "import warnings\n", + "\n", + "# define temporal & spatial range to perform STAC query\n", + "xmin, ymin, xmax, ymax = 13.25,54.25,13.75,54.75\n", + "aoi = box(xmin, ymin, xmax, ymax)\n", + "t_range = [\"2020-07-15\", \"2020-09-01\"]\n", + "\n", + "# STAC-based metadata retrieval\n", + "import planetary_computer as pc\n", + "platform = \"Planet\"\n", + "catalog = Client.open(\n", + " \"https://planetarycomputer.microsoft.com/api/stac/v1\",\n", + " modifier=pc.sign_inplace,\n", + ")\n", + "query = catalog.search(\n", + " collections=\"sentinel-2-l2a\", \n", + " datetime=t_range, \n", + " limit=100, \n", + " intersects=aoi\n", + ")\n", + "item_coll = query.item_collection()\n", + "\n", + "# define datacube\n", + "with open(\"layout_planet.json\", \"r\") as file:\n", + " dc = sq.datacube.STACCube(\n", + " json.load(file), \n", + " src = item_coll,\n", + " dtype=\"int8\",\n", + " na_value=0,\n", + " )\n", + " \n", + "# define spatio-temporal context vars \n", + "res = 20\n", + "epsg = 3035\n", + "space = sq.SpatialExtent(gpd.GeoDataFrame(geometry=[aoi], crs = 4326))\n", + "time = sq.TemporalExtent(*t_range)\n", + "\n", + "# load mapping\n", + "with open(\"mapping.json\", \"r\") as file:\n", + " rules = json.load(file)\n", + "mapping = sq.mapping.Semantique(rules)\n", + "\n", + "# define recipe\n", + "recipe = sq.QueryRecipe()\n", + "recipe[\"green_map\"] = (\n", + " sq.entity(\"vegetation\")\n", + " .filter(sq.entity(\"cloud\").evaluate(\"not\"))\n", + " .reduce(\"percentage\", \"time\")\n", + ")\n", + "recipe[\"all_count\"] = (\n", + " sq.entity(\"all\")\n", + " .reduce(\"count\", \"time\")\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "# normal execution (no caching/no preview)\n", + "context = {\n", + " \"datacube\": dc,\n", + " \"mapping\": mapping,\n", + " \"space\": space,\n", + " \"time\": time,\n", + " \"crs\": epsg,\n", + " \"tz\": \"UTC\",\n", + " \"spatial_resolution\": [-res, res]\n", + "}\n", + "\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\", UserWarning)\n", + " response = recipe.execute(**context)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "# preview mode\n", + "context = {\n", + " \"datacube\": dc,\n", + " \"mapping\": mapping,\n", + " \"space\": space,\n", + " \"time\": time,\n", + " \"crs\": epsg,\n", + " \"tz\": \"UTC\",\n", + " \"spatial_resolution\": [-res, res],\n", + " \"preview\": True\n", + "}\n", + "\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\", UserWarning)\n", + " response = recipe.execute(**context)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "# caching mode\n", + "context = {\n", + " \"datacube\": dc,\n", + " \"mapping\": mapping,\n", + " \"space\": space,\n", + " \"time\": time,\n", + " \"crs\": epsg,\n", + " \"tz\": \"UTC\",\n", + " \"spatial_resolution\": [-res, res],\n", + " \"caching\": True\n", + "}\n", + "\n", + "with warnings.catch_warnings():\n", + " warnings.simplefilter(\"ignore\", UserWarning)\n", + " response = recipe.execute(**context)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.1" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +}