diff --git a/pr-previews/pr-42/benchmark_analysis/benchmark_analysis.py b/pr-previews/pr-42/benchmark_analysis/benchmark_analysis.py index fbc845d..ea10df9 100644 --- a/pr-previews/pr-42/benchmark_analysis/benchmark_analysis.py +++ b/pr-previews/pr-42/benchmark_analysis/benchmark_analysis.py @@ -2,19 +2,22 @@ import json from collections import defaultdict -from typing import Dict +from typing import Any, Dict, List import pandas as pd import plotly.express as px -def process_benchmark_data() -> Dict[str, pd.DataFrame]: +def process_benchmark_data() -> Dict[str, Dict[str, pd.DataFrame]]: """Read the benchmark json and convert into a dictionary of dataframes""" with open("../benchmark.json", "r") as f: benchmarks = json.load(f)["benchmarks"] - records = defaultdict(list) + records: Dict[str, Dict[str, List[Dict[str, Any]]]] = defaultdict( + lambda: defaultdict(list) + ) for bench in benchmarks: + dataset = bench["extra_info"].pop("concept_config_id") record = { "mean_time": bench["stats"]["mean"], "stddev": bench["stats"]["stddev"], @@ -22,23 +25,22 @@ def process_benchmark_data() -> Dict[str, pd.DataFrame]: **bench["params"], } record.update(bench["params"]) - records[bench["group"]].append(record) + records[bench["group"]][dataset].append(record) - dfs = { - group: pd.DataFrame(records).sort_values(by="bbox_size") - for group, records in records.items() - } + dfs: Dict[str, Dict[str, pd.DataFrame]] = defaultdict(dict) + for group, dataset_records in records.items(): + for dataset, _records in dataset_records.items(): + df = pd.DataFrame(_records).sort_values(by="bbox_size") - for group, df in dfs.items(): - bbox_dims = sorted( - df["bbox_dims"].unique(), key=lambda x: float(x.split("x")[0]) * -1 - ) + bbox_dims = sorted( + df["bbox_dims"].unique(), key=lambda x: float(x.split("x")[0]) * -1 + ) - df["bbox_dims"] = pd.Categorical( - df["bbox_dims"], categories=bbox_dims, ordered=True - ) + df["bbox_dims"] = pd.Categorical( + df["bbox_dims"], categories=bbox_dims, ordered=True + ) - dfs[group] = df + dfs[group][dataset] = df return dfs diff --git a/pr-previews/pr-42/benchmark_analysis/index.html b/pr-previews/pr-42/benchmark_analysis/index.html index 5bc7157..366586a 100644 --- a/pr-previews/pr-42/benchmark_analysis/index.html +++ b/pr-previews/pr-42/benchmark_analysis/index.html @@ -975,7 +975,7 @@

Benchmark analysis

-
+
-
-
@@ -2071,7 +2071,7 @@

Display tiles in an interactive map <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no" /> <style> - #map_dc6413e8f67d512590149c0be10ee73e { + #map_e595129d30af604fd498ce328de71abd { position: relative; width: 100.0%; height: 100.0%; @@ -2085,14 +2085,14 @@

Display tiles in an interactive map <body> - <div class="folium-map" id="map_dc6413e8f67d512590149c0be10ee73e" ></div> + <div class="folium-map" id="map_e595129d30af604fd498ce328de71abd" ></div> </body> <script> - var map_dc6413e8f67d512590149c0be10ee73e = L.map( - "map_dc6413e8f67d512590149c0be10ee73e", + var map_e595129d30af604fd498ce328de71abd = L.map( + "map_e595129d30af604fd498ce328de71abd", { center: [70.0, -40.0], crs: L.CRS.EPSG3857, @@ -2106,22 +2106,22 @@

Display tiles in an interactive map - var tile_layer_7f3e5da3047a39673e7316a6f9efb284 = L.tileLayer( + var tile_layer_9226113b48a1860a652ba0f1be6ec317 = L.tileLayer( "https://tile.openstreetmap.org/{z}/{x}/{y}.png", {"attribution": "\u0026copy; \u003ca href=\"https://www.openstreetmap.org/copyright\"\u003eOpenStreetMap\u003c/a\u003e contributors", "detectRetina": false, "maxNativeZoom": 19, "maxZoom": 19, "minZoom": 0, "noWrap": false, "opacity": 1, "subdomains": "abc", "tms": false} ); - tile_layer_7f3e5da3047a39673e7316a6f9efb284.addTo(map_dc6413e8f67d512590149c0be10ee73e); + tile_layer_9226113b48a1860a652ba0f1be6ec317.addTo(map_e595129d30af604fd498ce328de71abd); - var tile_layer_241e2c006a4d77fb19d899e4d8b9a5f1 = L.tileLayer( + var tile_layer_62f8b0fb6d70e633e068b20ad2c9535a = L.tileLayer( "https://dev-titiler-cmr.delta-backend.com/tiles/WebMercatorQuad/{z}/{x}/{y}@1x?concept_id=C1996881146-POCLOUD\u0026datetime=2024-10-10T00%3A00%3A00%2B00%3A00\u0026backend=xarray\u0026variable=sea_ice_fraction\u0026rescale=0%2C1\u0026colormap_name=blues_r", {"attribution": "NASA", "detectRetina": false, "maxZoom": 18, "minZoom": 0, "noWrap": false, "opacity": 1, "subdomains": "abc", "tms": false} ); - tile_layer_241e2c006a4d77fb19d899e4d8b9a5f1.addTo(map_dc6413e8f67d512590149c0be10ee73e); + tile_layer_62f8b0fb6d70e633e068b20ad2c9535a.addTo(map_e595129d30af604fd498ce328de71abd); </script> </html>' style="position:absolute;width:100%;height:100%;left:0;top:0;border:none !important;" webkitallowfullscreen=""> diff --git a/pr-previews/pr-42/search/search_index.json b/pr-previews/pr-42/search/search_index.json index 711298d..6c96670 100644 --- a/pr-previews/pr-42/search/search_index.json +++ b/pr-previews/pr-42/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Home","text":"

A modern dynamic tile server with a NASA CMR backend built on top of FastAPI and Rasterio/GDAL.

"},{"location":"#titiler-cmr","title":"titiler-cmr","text":"

An API for creating image tiles from CMR queries.

"},{"location":"#features","title":"Features","text":"
  • Render tiles from assets discovered via queries to NASA's CMR
  • Uses the earthaccess python package to query the CMR
  • Built on top of titiler
  • Multiple projections support (see TileMatrixSets) via morecantile.
  • JPEG / JP2 / PNG / WEBP / GTIFF / NumpyTile output format support
  • Automatic OpenAPI documentation (FastAPI builtin)
  • Example of AWS Lambda / ECS deployment (via CDK)
"},{"location":"#installation","title":"Installation","text":"

To install from sources and run for development, install uv then:

git clone https://github.com/developmentseed/titiler-cmr.git\ncd titiler-cmr\n\nuv sync --all-extras\n
"},{"location":"#authentication-for-data-read-access","title":"Authentication for data read access","text":"

titiler-cmr can read data either over HTTP (external) or directly from AWS S3 (direct) depending on the app configuration. The behavior of the application is controlled by the S3 authentication settings in settings.py, which you can set either with environment variables (TITILER_CMR_S3_AUTH_ACCESS, TITILER_CMR_S3_AUTH_STRATEGY) or in an environment file (.env).

"},{"location":"#direct-from-s3","title":"Direct from S3","text":"

When running in an AWS context (e.g. Lambda), you should configure the application to access the data directly from S3. You can do this in two ways:

  • Configure an AWS IAM role for your runtime environment that has read access to the NASA buckets so that rasterio/GDAL can find the AWS credentials when reading data
  • Set the EARTHDATA_USERNAME and EARTHDATA_PASSWORD environment variables so that the earthaccess package can issue temporary AWS credentials

Note

Direct S3 access configuration will only work if the application is running in the same AWS region as the data are stored!

"},{"location":"#external-access","title":"External access","text":"

When running outside of the AWS context (e.g. locally) you will need to configure the application to access data over HTTP. You can do this by creating an Earthdata account, configuring your .netrc file with your Earthdata login credentials (which GDAL will find when trying to access data over the network), and setting a few environment variables:

# environment variables for GDAL to read data from NASA over HTTP\nexport GDAL_DISABLE_READDIR_ON_OPEN=YES\nexport CPL_VSIL_CURL_USE_HEAD=FALSE\nexport GDAL_HTTP_COOKIEFILE=/tmp/cookies.txt\nexport GDAL_HTTP_COOKIEJAR=/tmp/cookies.txt\nexport EARTHDATA_USERNAME={your earthdata username}\nexport EARTHDATA_PASSWORD={your earthdata password}\n\n# write your .netrc file to the home directory\necho \"machine urs.earthdata.nasa.gov login ${EARTHDATA_USERNAME} password ${EARTHDATA_PASSWORD}\" > ~/.netrc\n

Note

See NASA's docs for details

"},{"location":"#docker-deployment","title":"Docker deployment","text":"

You can run the application in a docker container using the docker-compose.yml file. The docker container is configured to read the EARTHDATA_USERNAME and EARTHDATA_PASSWORD environment variables so make sure set those before starting the docker network.

docker compose up --build \n

The application will be available at this address: http://localhost:8081/api.html

"},{"location":"#local-deployment","title":"Local deployment","text":"

To run the application directly in your local environment, configure the application to access data over HTTP then run it using uvicorn:

TITILER_CMR_S3_AUTH_ACCESS=external uvicorn titiler.cmr.main:app --reload\n

The application will be available at this address: http://localhost:8000/api.html

"},{"location":"#contribution-development","title":"Contribution & Development","text":"

See CONTRIBUTING.md

"},{"location":"#license","title":"License","text":"

See LICENSE

"},{"location":"#authors","title":"Authors","text":"

Created by Development Seed

See contributors for a listing of individual contributors.

"},{"location":"#changes","title":"Changes","text":"

See CHANGES.md.

"},{"location":"benchmark_analysis/","title":"Benchmark analysis","text":"In\u00a0[1]: Copied!
\"\"\"Functions for processing and plotting the time series benchmark results\"\"\"\n
\"\"\"Functions for processing and plotting the time series benchmark results\"\"\" Out[1]:
'Functions for processing and plotting the time series benchmark results'
In\u00a0[2]: Copied!
import json\nfrom collections import defaultdict\nfrom typing import Dict\n
import json from collections import defaultdict from typing import Dict In\u00a0[3]: Copied!
import pandas as pd\nimport plotly.express as px\n
import pandas as pd import plotly.express as px In\u00a0[4]: Copied!
def process_benchmark_data() -> Dict[str, pd.DataFrame]:\n    \"\"\"Read the benchmark json and convert into a dictionary of dataframes\"\"\"\n    with open(\"../benchmark.json\", \"r\") as f:\n        benchmarks = json.load(f)[\"benchmarks\"]\n\n    records = defaultdict(list)\n    for bench in benchmarks:\n        record = {\n            \"mean_time\": bench[\"stats\"][\"mean\"],\n            \"stddev\": bench[\"stats\"][\"stddev\"],\n            **bench[\"extra_info\"],\n            **bench[\"params\"],\n        }\n        record.update(bench[\"params\"])\n        records[bench[\"group\"]].append(record)\n\n    dfs = {\n        group: pd.DataFrame(records).sort_values(by=\"bbox_size\")\n        for group, records in records.items()\n    }\n\n    for group, df in dfs.items():\n        bbox_dims = sorted(\n            df[\"bbox_dims\"].unique(), key=lambda x: float(x.split(\"x\")[0]) * -1\n        )\n\n        df[\"bbox_dims\"] = pd.Categorical(\n            df[\"bbox_dims\"], categories=bbox_dims, ordered=True\n        )\n\n        dfs[group] = df\n\n    return dfs\n
def process_benchmark_data() -> Dict[str, pd.DataFrame]: \"\"\"Read the benchmark json and convert into a dictionary of dataframes\"\"\" with open(\"../benchmark.json\", \"r\") as f: benchmarks = json.load(f)[\"benchmarks\"] records = defaultdict(list) for bench in benchmarks: record = { \"mean_time\": bench[\"stats\"][\"mean\"], \"stddev\": bench[\"stats\"][\"stddev\"], **bench[\"extra_info\"], **bench[\"params\"], } record.update(bench[\"params\"]) records[bench[\"group\"]].append(record) dfs = { group: pd.DataFrame(records).sort_values(by=\"bbox_size\") for group, records in records.items() } for group, df in dfs.items(): bbox_dims = sorted( df[\"bbox_dims\"].unique(), key=lambda x: float(x.split(\"x\")[0]) * -1 ) df[\"bbox_dims\"] = pd.Categorical( df[\"bbox_dims\"], categories=bbox_dims, ordered=True ) dfs[group] = df return dfs In\u00a0[5]: Copied!
def plot_line_with_error_bars(df: pd.DataFrame, **kwargs):\n    \"\"\"Create line plot with vertical error bars\"\"\"\n    fig = px.line(\n        df,\n        x=\"num_timepoints\",\n        y=\"mean_time\",\n        error_y=\"stddev\",\n        labels={\n            \"mean_time\": \"mean response time (seconds)\",\n            \"num_timepoints\": \"number of points in time series\",\n        },\n        **kwargs,\n    )\n\n    return fig\n
def plot_line_with_error_bars(df: pd.DataFrame, **kwargs): \"\"\"Create line plot with vertical error bars\"\"\" fig = px.line( df, x=\"num_timepoints\", y=\"mean_time\", error_y=\"stddev\", labels={ \"mean_time\": \"mean response time (seconds)\", \"num_timepoints\": \"number of points in time series\", }, **kwargs, ) return fig In\u00a0[6]: Copied!
def plot_error_rate_heatmap(\n    df: pd.DataFrame,\n    x: str,\n    y: str,\n    z: str,\n    labels: Dict[str, str],\n    title: str,\n):\n    \"\"\"Plot error rate as a heatmap\"\"\"\n    # Create the pivot table for heatmap\n    data = df[[x, y, z]].pivot(index=y, columns=x, values=z)\n\n    # Create the faceted heatmap using plotly\n    fig = px.imshow(\n        data,\n        labels=labels,\n        title=title,\n    )\n\n    return fig\n
def plot_error_rate_heatmap( df: pd.DataFrame, x: str, y: str, z: str, labels: Dict[str, str], title: str, ): \"\"\"Plot error rate as a heatmap\"\"\" # Create the pivot table for heatmap data = df[[x, y, z]].pivot(index=y, columns=x, values=z) # Create the faceted heatmap using plotly fig = px.imshow( data, labels=labels, title=title, ) return fig In\u00a0[7]: Copied!
# Load and process the data\ndfs = process_benchmark_data()\n
# Load and process the data dfs = process_benchmark_data()
\n---------------------------------------------------------------------------\nFileNotFoundError                         Traceback (most recent call last)\nCell In[7], line 2\n      1 # Load and process the data\n----> 2 dfs = process_benchmark_data()\n\nCell In[4], line 3, in process_benchmark_data()\n      1 def process_benchmark_data() -> Dict[str, pd.DataFrame]:\n      2     \"\"\"Read the benchmark json and convert into a dictionary of dataframes\"\"\"\n----> 3     with open(\"../benchmark.json\", \"r\") as f:\n      4         benchmarks = json.load(f)[\"benchmarks\"]\n      6     records = defaultdict(list)\n\nFile ~/work/titiler-cmr/titiler-cmr/.venv/lib/python3.10/site-packages/IPython/core/interactiveshell.py:324, in _modified_open(file, *args, **kwargs)\n    317 if file in {0, 1, 2}:\n    318     raise ValueError(\n    319         f\"IPython won't let you open fd={file} by default \"\n    320         \"as it is likely to crash IPython. If you know what you are doing, \"\n    321         \"you can use builtins' open.\"\n    322     )\n--> 324 return io_open(file, *args, **kwargs)\n\nFileNotFoundError: [Errno 2] No such file or directory: '../benchmark.json'
"},{"location":"contributing/","title":"Development - Contributing","text":"

Issues and pull requests are more than welcome: github.com/developmentseed/titiler-cmr/issues

dev install

This project uses uv to manage the python environment and dependencies. To install the package for development you can follow these steps:

# install uv\n\n# unix\ncurl -LsSf https://astral.sh/uv/install.sh | sh\n\n# or windows\n# powershell -c \"irm https://astral.sh/uv/install.ps1 | iex\"\n\ngit clone https://github.com/developmentseed/titiler-cmr.git\ncd titiler-cmr\nuv sync --all-extras\n
"},{"location":"contributing/#linting","title":"Linting","text":"

This repo is set to use pre-commit to run isort, flake8, pydocstring, black (\"uncompromising Python code formatter\") and mypy when committing new code.

uv pre-commit install\n
"},{"location":"contributing/#testing","title":"Testing","text":"

You can then run the tests with the following command:

uv run pytest\n

The tests use vcrpy <https://vcrpy.readthedocs.io/en/latest/>_ to mock API calls with \"pre-recorded\" API responses. When adding new tests that incur actual network traffic, use the @pytest.mark.vcr decorator function to indicate vcrpy should be used. Record the new responses and commit them to the repository.

uv run pytest -v -s --record-mode new_episodes\n
"},{"location":"contributing/#benchmarks","title":"Benchmarks","text":"

tests/test_timeseries_benchmark.py is used to evaluate the performance and limits of /timeseries requests.

To run the benchmarks:

uv run pytest -vv --benchmark-only --benchmark-json benchmark.json\n
"},{"location":"contributing/#documentation","title":"Documentation","text":"

The documentation is generated using mkdocs and gets built and deployed to Github Pages when new tags are released and on pushes to the develop branch.

To preview the documentation in your browser you can run:

uv run mkdocs serve -o\n
"},{"location":"release-notes/","title":"Changelog","text":"

All notable changes to this project will be documented in this file.

The format is based on Keep a Changelog.

"},{"location":"release-notes/#unreleased","title":"Unreleased","text":""},{"location":"release-notes/#added","title":"Added","text":"
  • Initial implementation of STAC metadata structure
"},{"location":"release-notes/#deprecated","title":"Deprecated","text":"
  • Nothing.
"},{"location":"release-notes/#removed","title":"Removed","text":"
  • Nothing.
"},{"location":"release-notes/#fixed","title":"Fixed","text":"
  • Nothing.
"},{"location":"release-notes/#012","title":"0.1.2","text":""},{"location":"release-notes/#added_1","title":"Added","text":"
  • Support for /timeseries endpoints (#33)
"},{"location":"release-notes/#deprecated_1","title":"Deprecated","text":"
  • Nothing.
"},{"location":"release-notes/#removed_1","title":"Removed","text":"
  • Nothing.
"},{"location":"release-notes/#fixed_1","title":"Fixed","text":"
  • Nothing.
"},{"location":"release-notes/#011","title":"0.1.1","text":""},{"location":"release-notes/#added_2","title":"Added","text":"
  • Add /bbox, /feature, and /statistics endpoints (#30)
"},{"location":"release-notes/#deprecated_2","title":"Deprecated","text":"
  • Nothing.
"},{"location":"release-notes/#removed_2","title":"Removed","text":"
  • Nothing.
"},{"location":"release-notes/#fixed_2","title":"Fixed","text":"
  • Nothing.
"},{"location":"release-notes/#011_1","title":"0.1.1","text":""},{"location":"release-notes/#added_3","title":"Added","text":"

-

Ability to run locally with Earthdata authentication (#28)

"},{"location":"release-notes/#deprecated_3","title":"Deprecated","text":"
  • Nothing.
"},{"location":"release-notes/#removed_3","title":"Removed","text":"
  • Nothing.
"},{"location":"release-notes/#fixed_3","title":"Fixed","text":"
  • Nothing.
"},{"location":"time_series_performance_benchmarks/","title":"Time series performance benchmarks","text":"In\u00a0[1]: Copied!
import benchmark_analysis as ba\n
import benchmark_analysis as ba In\u00a0[2]: Copied!
ba.plot_error_rate_heatmap(\n    df=ba.dfs[\"statistics\"],\n    x=\"num_timepoints\",\n    y=\"bbox_dims\",\n    z=\"error_rate\",\n    labels={\"x\": \"number of time points\", \"y\": \"bbox dimensions\", \"color\": \"error rate\"},\n    title=\"error rate by bbox size and number of time points\",\n)\n
ba.plot_error_rate_heatmap( df=ba.dfs[\"statistics\"], x=\"num_timepoints\", y=\"bbox_dims\", z=\"error_rate\", labels={\"x\": \"number of time points\", \"y\": \"bbox dimensions\", \"color\": \"error rate\"}, title=\"error rate by bbox size and number of time points\", )

In general, the size of the area you want to analyze will have minimal impact on the runtime! This is because titiler.xarray has to read the entire granule into memory before subsetting, so reducing the size of the AOI

In\u00a0[3]: Copied!
ba.plot_line_with_error_bars(\n    df=ba.dfs[\"statistics\"].sort_values([\"bbox_size\", \"num_timepoints\"]),\n    color=\"bbox_dims\",\n    title=\"statistics runtime\",\n)\n
ba.plot_line_with_error_bars( df=ba.dfs[\"statistics\"].sort_values([\"bbox_size\", \"num_timepoints\"]), color=\"bbox_dims\", title=\"statistics runtime\", ) In\u00a0[4]: Copied!
for img_size in sorted(ba.dfs[\"gif-timepoints\"][\"img_size\"].unique()):\n    img_size_df = ba.dfs[\"gif-timepoints\"][ba.dfs[\"gif-timepoints\"][\"img_size\"] == img_size]\n    img_dims = img_size_df[\"img_dims\"].unique()[0]\n    fig = ba.plot_error_rate_heatmap(\n        df=img_size_df,\n        x=\"num_timepoints\",\n        y=\"bbox_dims\",\n        z=\"error_rate\",\n        labels={\"x\": \"number of time points\", \"y\": \"bbox dimensions\", \"color\": \"error rate\"},\n        title=f\"image size: {img_dims}\",\n    )\n    fig.show()\n
for img_size in sorted(ba.dfs[\"gif-timepoints\"][\"img_size\"].unique()): img_size_df = ba.dfs[\"gif-timepoints\"][ba.dfs[\"gif-timepoints\"][\"img_size\"] == img_size] img_dims = img_size_df[\"img_dims\"].unique()[0] fig = ba.plot_error_rate_heatmap( df=img_size_df, x=\"num_timepoints\", y=\"bbox_dims\", z=\"error_rate\", labels={\"x\": \"number of time points\", \"y\": \"bbox dimensions\", \"color\": \"error rate\"}, title=f\"image size: {img_dims}\", ) fig.show()

The size of the area of interest increases the response time, especially for requests for higher resolution images.

In\u00a0[5]: Copied!
ba.plot_line_with_error_bars(\n    df=ba.dfs[\"gif-timepoints\"].sort_values([\"bbox_size\", \"num_timepoints\"]),\n    color=\"bbox_dims\",\n    facet_row=\"img_dims\",\n)\n
ba.plot_line_with_error_bars( df=ba.dfs[\"gif-timepoints\"].sort_values([\"bbox_size\", \"num_timepoints\"]), color=\"bbox_dims\", facet_row=\"img_dims\", )"},{"location":"time_series_performance_benchmarks/#time-series-performance-benchmarks","title":"Time series performance benchmarks\u00b6","text":"

The titiler-cmr API is deployed as a Lambda function in the SMCE VEDA AWS account. For small time series requests (<500 time points) you can expect a response from any of the endpoints within ~20 seconds. For larger time series requests, you run the risk of bumping into Lambda concurrency or timeout limits. This report shows some results from the test_timeseries_benchmarks.py script that sends many requests with varying time series lengths as well as several other parameters that affect runtime.

"},{"location":"time_series_performance_benchmarks/#xarray-backend","title":"xarray backend\u00b6","text":"

The following tests use the GAMSSA 28km SST dataset to evaluate the limits of the /timeseries endpoints for the xarray backend.

"},{"location":"time_series_performance_benchmarks/#statistics","title":"statistics\u00b6","text":"

Under the current deployment configuration statistics endpoint can process time series requests with up to ~1000 points. Requests that involve more than 1000 points are likely to fail.

"},{"location":"time_series_performance_benchmarks/#bbox-animations","title":"bbox (animations)\u00b6","text":"

Under the current deployment configuration the bbox endpoint can reliably process time series requests with up to ~500 points. Requests that involve more than 500 points may fail if the area of interest is very large.

"},{"location":"examples/rasterio_backend_example/","title":"rasterio backend example: HLS","text":"In\u00a0[1]: Copied!
import earthaccess\nimport geojson_pydantic\nimport httpx\nimport json\n\n\nfrom folium import GeoJson, Map, TileLayer\n
import earthaccess import geojson_pydantic import httpx import json from folium import GeoJson, Map, TileLayer In\u00a0[2]: Copied!
# titiler_endpoint = \"http://localhost:8081\"  # docker network endpoint\ntitiler_endpoint = \"https://dev-titiler-cmr.delta-backend.com\"  # deployed endpoint\n
# titiler_endpoint = \"http://localhost:8081\" # docker network endpoint titiler_endpoint = \"https://dev-titiler-cmr.delta-backend.com\" # deployed endpoint In\u00a0[3]: Copied!
datasets = earthaccess.search_datasets(doi=\"10.5067/HLS/HLSL30.002\")\nds = datasets[0]\n\nconcept_id = ds[\"meta\"][\"concept-id\"]\nprint(\"Concept-Id: \", concept_id)\nprint(\"Abstract: \", ds[\"umm\"][\"Abstract\"])\n
datasets = earthaccess.search_datasets(doi=\"10.5067/HLS/HLSL30.002\") ds = datasets[0] concept_id = ds[\"meta\"][\"concept-id\"] print(\"Concept-Id: \", concept_id) print(\"Abstract: \", ds[\"umm\"][\"Abstract\"])
Concept-Id:  C2021957657-LPCLOUD\nAbstract:  The Harmonized Landsat Sentinel-2 (HLS) project provides consistent surface reflectance (SR) and top of atmosphere (TOA) brightness data from a virtual constellation of satellite sensors. The Operational Land Imager (OLI) is housed aboard the joint NASA/USGS Landsat 8 and Landsat 9 satellites, while the Multi-Spectral Instrument (MSI) is mounted aboard Europe\u2019s Copernicus Sentinel-2A and Sentinel-2B satellites. The combined measurement enables global observations of the land every 2\u20133 days at 30-meter (m) spatial resolution. The HLS project uses a set of algorithms to obtain seamless products from OLI and MSI that include atmospheric correction, cloud and cloud-shadow masking, spatial co-registration and common gridding, illumination and view angle normalization, and spectral bandpass adjustment.\r\n\r\nThe HLSL30 product provides 30-m Nadir Bidirectional Reflectance Distribution Function (BRDF)-Adjusted Reflectance (NBAR) and is derived from Landsat 8/9 OLI data products. The HLSS30 and HLSL30 products are gridded to the same resolution and Military Grid Reference System (MGRS)(https://hls.gsfc.nasa.gov/products-description/tiling-system/) tiling system, and thus are \u201cstackable\u201d for time series analysis.\r\n\r\nThe HLSL30 product is provided in Cloud Optimized GeoTIFF (COG) format, and each band is distributed as a separate file. There are 11 bands included in the HLSL30 product along with one quality assessment (QA) band and four angle bands. See the User Guide for a more detailed description of the individual bands provided in the HLSL30 product.\n
In\u00a0[4]: Copied!
import earthaccess\nimport morecantile\n\ntms = morecantile.tms.get(\"WebMercatorQuad\")\n\nbounds = tms.bounds(62, 44, 7)\nxmin, ymin, xmax, ymax = (round(n, 8) for n in bounds)\n\nresults = earthaccess.search_data(\n    bounding_box=(xmin, ymin, xmax, ymax),\n    count=1,\n    concept_id=concept_id,\n    temporal=(\"2024-02-11\", \"2024-02-13\"),\n)\nprint(\"Granules:\")\nprint(results)\nprint()\nprint(\"Example of COGs URL: \")\nfor link in results[0].data_links(access=\"direct\"):\n    print(link)\n
import earthaccess import morecantile tms = morecantile.tms.get(\"WebMercatorQuad\") bounds = tms.bounds(62, 44, 7) xmin, ymin, xmax, ymax = (round(n, 8) for n in bounds) results = earthaccess.search_data( bounding_box=(xmin, ymin, xmax, ymax), count=1, concept_id=concept_id, temporal=(\"2024-02-11\", \"2024-02-13\"), ) print(\"Granules:\") print(results) print() print(\"Example of COGs URL: \") for link in results[0].data_links(access=\"direct\"): print(link)
Granules:\n[Collection: {'EntryTitle': 'HLS Landsat Operational Land Imager Surface Reflectance and TOA Brightness Daily Global 30m v2.0'}\nSpatial coverage: {'HorizontalSpatialDomain': {'Geometry': {'GPolygons': [{'Boundary': {'Points': [{'Longitude': -2.64743819, 'Latitude': 48.6644919}, {'Longitude': -2.21521695, 'Latitude': 49.65006328}, {'Longitude': -3.00027708, 'Latitude': 49.65272281}, {'Longitude': -3.00027162, 'Latitude': 48.66503141}, {'Longitude': -2.64743819, 'Latitude': 48.6644919}]}}]}}}\nTemporal coverage: {'RangeDateTime': {'BeginningDateTime': '2024-02-12T11:05:26.302Z', 'EndingDateTime': '2024-02-12T11:05:50.181Z'}}\nSize(MB): 56.62721920013428\nData: ['https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B02.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B06.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B01.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.SAA.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B07.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.SZA.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B03.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.Fmask.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B04.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B05.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.VAA.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.VZA.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B11.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B10.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B09.tif']]\n\nExample of COGs URL: \ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B02.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B06.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B01.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.SAA.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B07.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.SZA.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B03.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.Fmask.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B04.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B05.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.VAA.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.VZA.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B11.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B10.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B09.tif\n
In\u00a0[5]: Copied!
from titiler.cmr.backend import CMRBackend\nfrom titiler.cmr.reader import MultiFilesBandsReader\n\nwith CMRBackend(reader=MultiFilesBandsReader) as backend:\n    assets = backend.assets_for_tile(\n        x=62,\n        y=44,\n        z=7,\n        bands_regex=\"B[0-9][0-9]\",\n        concept_id=concept_id,\n        temporal=(\"2024-02-11\", \"2024-02-13\")\n    )\n\nprint(assets[0])\n
from titiler.cmr.backend import CMRBackend from titiler.cmr.reader import MultiFilesBandsReader with CMRBackend(reader=MultiFilesBandsReader) as backend: assets = backend.assets_for_tile( x=62, y=44, z=7, bands_regex=\"B[0-9][0-9]\", concept_id=concept_id, temporal=(\"2024-02-11\", \"2024-02-13\") ) print(assets[0])
{'url': {'B02': 's3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B02.tif', 'B06': 's3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B06.tif', 'B01': 's3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B01.tif', 'B07': 's3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B07.tif', 'B03': 's3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B03.tif', 'B04': 's3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B04.tif', 'B05': 's3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B05.tif', 'B11': 's3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B11.tif', 'B10': 's3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B10.tif', 'B09': 's3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B09.tif'}, 'provider': 'LPCLOUD'}\n
In\u00a0[6]: Copied!
from IPython.display import IFrame\nIFrame(f\"{titiler_endpoint}/api.html\", 900,500)\n
from IPython.display import IFrame IFrame(f\"{titiler_endpoint}/api.html\", 900,500) Out[6]: In\u00a0[7]: Copied!
r = httpx.get(\n    f\"{titiler_endpoint}/WebMercatorQuad/tilejson.json\",\n    params = (\n        (\"concept_id\", concept_id),\n        # Datetime in form of `start_date/end_date`\n        (\"datetime\", \"2024-10-01T00:00:00Z/2024-10-10T23:59:59Z\"),\n        # We know that the HLS collection dataset is stored as File per Band\n        # so we need to pass a `band_regex` option to assign `bands` to each URL\n        (\"bands_regex\", \"B[0-9][0-9]\"),\n        # titiler-cmr can work with both Zarr and COG dataset\n        # but we need to tell the endpoints in advance which backend\n        # to use\n        (\"backend\", \"rasterio\"),\n        # True Color Image B04,B03,B02\n        (\"bands\", \"B04\"),\n        (\"bands\", \"B03\"),\n        (\"bands\", \"B02\"),\n        # The data is in type of Uint16 so we need to apply some\n        # rescaling/color_formula in order to create PNGs\n        (\"color_formula\", \"Gamma RGB 3.5 Saturation 1.7 Sigmoidal RGB 15 0.35\"),\n        # We need to set min/max zoom because we don't want to use lowerzoom level (e.g 0)\n        # which will results in useless large scale query\n        (\"minzoom\", 8),\n        (\"maxzoom\", 13),\n    )\n).json()\n\nprint(r)\n
r = httpx.get( f\"{titiler_endpoint}/WebMercatorQuad/tilejson.json\", params = ( (\"concept_id\", concept_id), # Datetime in form of `start_date/end_date` (\"datetime\", \"2024-10-01T00:00:00Z/2024-10-10T23:59:59Z\"), # We know that the HLS collection dataset is stored as File per Band # so we need to pass a `band_regex` option to assign `bands` to each URL (\"bands_regex\", \"B[0-9][0-9]\"), # titiler-cmr can work with both Zarr and COG dataset # but we need to tell the endpoints in advance which backend # to use (\"backend\", \"rasterio\"), # True Color Image B04,B03,B02 (\"bands\", \"B04\"), (\"bands\", \"B03\"), (\"bands\", \"B02\"), # The data is in type of Uint16 so we need to apply some # rescaling/color_formula in order to create PNGs (\"color_formula\", \"Gamma RGB 3.5 Saturation 1.7 Sigmoidal RGB 15 0.35\"), # We need to set min/max zoom because we don't want to use lowerzoom level (e.g 0) # which will results in useless large scale query (\"minzoom\", 8), (\"maxzoom\", 13), ) ).json() print(r)
{'tilejson': '2.2.0', 'version': '1.0.0', 'scheme': 'xyz', 'tiles': ['https://dev-titiler-cmr.delta-backend.com/tiles/WebMercatorQuad/{z}/{x}/{y}@1x?concept_id=C2021957657-LPCLOUD&datetime=2024-10-01T00%3A00%3A00Z%2F2024-10-10T23%3A59%3A59Z&bands_regex=B%5B0-9%5D%5B0-9%5D&backend=rasterio&bands=B04&bands=B03&bands=B02&color_formula=Gamma+RGB+3.5+Saturation+1.7+Sigmoidal+RGB+15+0.35'], 'minzoom': 8, 'maxzoom': 13, 'bounds': [-180.0, -90.0, 180.0, 90.0], 'center': [0.0, 0.0, 8]}\n
In\u00a0[8]: Copied!
bounds = r[\"bounds\"]\nm = Map(\n    location=(47.590266824611675, -91.03729840730689),\n    zoom_start=r[\"maxzoom\"] - 2\n)\n\nTileLayer(\n    tiles=r[\"tiles\"][0],\n    opacity=1,\n    attr=\"NASA\",\n).add_to(m)\nm\n
bounds = r[\"bounds\"] m = Map( location=(47.590266824611675, -91.03729840730689), zoom_start=r[\"maxzoom\"] - 2 ) TileLayer( tiles=r[\"tiles\"][0], opacity=1, attr=\"NASA\", ).add_to(m) m Out[8]: Make this Notebook Trusted to load map: File -> Trust Notebook In\u00a0[9]: Copied!
r = httpx.get(\n    f\"{titiler_endpoint}/WebMercatorQuad/tilejson.json\",\n    params = (\n        (\"concept_id\", concept_id),\n        # Datetime in form of `start_date/end_date`\n        (\"datetime\", \"2024-06-20T00:00:00Z/2024-06-27T23:59:59Z\"),\n        # We know that the HLS collection dataset is stored as File per Band\n        # so we need to pass a `band_regex` option to assign `bands` to each URL\n        (\"bands_regex\", \"B[0-9][0-9]\"),\n        # titiler-cmr can work with both Zarr and COG dataset\n        # but we need to tell the endpoints in advance which backend\n        # to use\n        (\"backend\", \"rasterio\"),\n        # NDVI\n        (\"expression\", \"(B05-B04)/(B05+B04)\"),\n        # Need red (B04) and nir (B05) for NDVI\n        (\"bands\", \"B05\"),\n        (\"bands\", \"B04\"),\n        # The data is in type of Uint16 so we need to apply some\n        # rescaling/color_formula in order to create PNGs\n        (\"colormap_name\", \"viridis\"),\n        (\"rescale\", \"-1,1\"),\n        # We need to set min/max zoom because we don't want to use lowerzoom level (e.g 0)\n        # which will results in useless large scale query\n        (\"minzoom\", 8),\n        (\"maxzoom\", 13),\n    )\n).json()\n\nm = Map(\n    location=(47.9221313337365, -91.65432884883238),\n    zoom_start=r[\"maxzoom\"] - 1\n)\n\n\nTileLayer(\n    tiles=r[\"tiles\"][0],\n    opacity=1,\n    attr=\"NASA\",\n).add_to(m)\n\nm\n
r = httpx.get( f\"{titiler_endpoint}/WebMercatorQuad/tilejson.json\", params = ( (\"concept_id\", concept_id), # Datetime in form of `start_date/end_date` (\"datetime\", \"2024-06-20T00:00:00Z/2024-06-27T23:59:59Z\"), # We know that the HLS collection dataset is stored as File per Band # so we need to pass a `band_regex` option to assign `bands` to each URL (\"bands_regex\", \"B[0-9][0-9]\"), # titiler-cmr can work with both Zarr and COG dataset # but we need to tell the endpoints in advance which backend # to use (\"backend\", \"rasterio\"), # NDVI (\"expression\", \"(B05-B04)/(B05+B04)\"), # Need red (B04) and nir (B05) for NDVI (\"bands\", \"B05\"), (\"bands\", \"B04\"), # The data is in type of Uint16 so we need to apply some # rescaling/color_formula in order to create PNGs (\"colormap_name\", \"viridis\"), (\"rescale\", \"-1,1\"), # We need to set min/max zoom because we don't want to use lowerzoom level (e.g 0) # which will results in useless large scale query (\"minzoom\", 8), (\"maxzoom\", 13), ) ).json() m = Map( location=(47.9221313337365, -91.65432884883238), zoom_start=r[\"maxzoom\"] - 1 ) TileLayer( tiles=r[\"tiles\"][0], opacity=1, attr=\"NASA\", ).add_to(m) m Out[9]: Make this Notebook Trusted to load map: File -> Trust Notebook In\u00a0[10]: Copied!
geojson = {\n  \"type\": \"FeatureCollection\",\n  \"features\": [\n    {\n      \"type\": \"Feature\",\n      \"properties\": {},\n      \"geometry\": {\n        \"coordinates\": [\n          [\n            [\n              -91.65432884883238,\n              47.9221313337365\n            ],\n            [\n              -91.65432884883238,\n              47.86503396133904\n            ],\n            [\n              -91.53842043960762,\n              47.86503396133904\n            ],\n            [\n              -91.53842043960762,\n              47.9221313337365\n            ],\n            [\n              -91.65432884883238,\n              47.9221313337365\n            ]\n          ]\n        ],\n        \"type\": \"Polygon\"\n      }\n    }\n  ]\n}\n
geojson = { \"type\": \"FeatureCollection\", \"features\": [ { \"type\": \"Feature\", \"properties\": {}, \"geometry\": { \"coordinates\": [ [ [ -91.65432884883238, 47.9221313337365 ], [ -91.65432884883238, 47.86503396133904 ], [ -91.53842043960762, 47.86503396133904 ], [ -91.53842043960762, 47.9221313337365 ], [ -91.65432884883238, 47.9221313337365 ] ] ], \"type\": \"Polygon\" } } ] } In\u00a0[11]: Copied!
import json\n\nr = httpx.post(\n    f\"{titiler_endpoint}/statistics\",\n    params=(\n        (\"concept_id\", concept_id),\n        # Datetime in form of `start_date/end_date`\n        (\"datetime\", \"2024-07-01T00:00:00Z/2024-07-10T23:59:59Z\"),\n        # We know that the HLS collection dataset is stored as File per Band\n        # so we need to pass a `band_regex` option to assign `bands` to each URL\n        (\"bands_regex\", \"B[0-9][0-9]\"),\n        # titiler-cmr can work with both Zarr and COG dataset\n        # but we need to tell the endpoints in advance which backend\n        # to use\n        (\"backend\", \"rasterio\"),\n        # NDVI\n        (\"expression\", \"(B05-B04)/(B05+B04)\"),\n        # Need red (B04) and nir (B05) for NDVI\n        (\"bands\", \"B05\"),\n        (\"bands\", \"B04\"),\n    ),\n    json=geojson,\n    timeout=30,\n).json()\n\nprint(json.dumps(r, indent=2))\n
import json r = httpx.post( f\"{titiler_endpoint}/statistics\", params=( (\"concept_id\", concept_id), # Datetime in form of `start_date/end_date` (\"datetime\", \"2024-07-01T00:00:00Z/2024-07-10T23:59:59Z\"), # We know that the HLS collection dataset is stored as File per Band # so we need to pass a `band_regex` option to assign `bands` to each URL (\"bands_regex\", \"B[0-9][0-9]\"), # titiler-cmr can work with both Zarr and COG dataset # but we need to tell the endpoints in advance which backend # to use (\"backend\", \"rasterio\"), # NDVI (\"expression\", \"(B05-B04)/(B05+B04)\"), # Need red (B04) and nir (B05) for NDVI (\"bands\", \"B05\"), (\"bands\", \"B04\"), ), json=geojson, timeout=30, ).json() print(json.dumps(r, indent=2))
{\n  \"type\": \"FeatureCollection\",\n  \"features\": [\n    {\n      \"type\": \"Feature\",\n      \"geometry\": {\n        \"type\": \"Polygon\",\n        \"coordinates\": [\n          [\n            [\n              -91.65432884883238,\n              47.9221313337365\n            ],\n            [\n              -91.65432884883238,\n              47.86503396133904\n            ],\n            [\n              -91.53842043960762,\n              47.86503396133904\n            ],\n            [\n              -91.53842043960762,\n              47.9221313337365\n            ],\n            [\n              -91.65432884883238,\n              47.9221313337365\n            ]\n          ]\n        ]\n      },\n      \"properties\": {\n        \"statistics\": {\n          \"(B05-B04)/(B05+B04)\": {\n            \"min\": -75.4,\n            \"max\": 26.6,\n            \"mean\": 0.5238783261952482,\n            \"count\": 57304.8046875,\n            \"sum\": 30020.745162633113,\n            \"std\": 0.6052277569586431,\n            \"median\": 0.6041512231282431,\n            \"majority\": 0.75,\n            \"minority\": -75.4,\n            \"unique\": 47613.0,\n            \"histogram\": [\n              [\n                1,\n                0,\n                2,\n                1,\n                0,\n                0,\n                16,\n                57764,\n                12,\n                2\n              ],\n              [\n                -75.4,\n                -65.2,\n                -55.00000000000001,\n                -44.80000000000001,\n                -34.60000000000001,\n                -24.400000000000006,\n                -14.20000000000001,\n                -4.000000000000014,\n                6.199999999999989,\n                16.39999999999999,\n                26.6\n              ]\n            ],\n            \"valid_percent\": 100.0,\n            \"masked_pixels\": 0.0,\n            \"valid_pixels\": 57798.0,\n            \"percentile_2\": 0.04382638010956595,\n            \"percentile_98\": 0.8685282140779523\n          }\n        }\n      }\n    }\n  ]\n}\n
In\u00a0[\u00a0]: Copied!
\n
"},{"location":"examples/rasterio_backend_example/#rasterio-backend-example-hls","title":"rasterio backend example: HLS\u00b6","text":"

The Harmonized Landsat Sentinel-2 dataset is available in two collections in CMR. This example will use data from the HLSL30.002 (Landsat) dataset.

"},{"location":"examples/rasterio_backend_example/#requirements","title":"Requirements\u00b6","text":"

To run some of the chunks in this notebook you will need to install a few packages:

  • earthaccess
  • folium
  • httpx

!pip install folium httpx earthaccess

"},{"location":"examples/rasterio_backend_example/#identify-the-dataset","title":"Identify the dataset\u00b6","text":"

You can find the HLSL30.002 dataset using the earthaccess.search_datasets function.

"},{"location":"examples/rasterio_backend_example/#examine-a-granule","title":"Examine a granule\u00b6","text":"

Each granule contains the data for a single point in time for an MGRS tile.

"},{"location":"examples/rasterio_backend_example/#demonstrate-assets_for_tile-method","title":"Demonstrate assets_for_tile method\u00b6","text":"

While rendering xyz tile images, titiler-cmr searches for assets using the assets_for_tile method which converts the xyz tile extent into a bounding box.

"},{"location":"examples/rasterio_backend_example/#titilercmr-api-documentation","title":"titiler.cmr API documentation\u00b6","text":""},{"location":"examples/rasterio_backend_example/#display-tiles-in-an-interactive-map","title":"Display tiles in an interactive map\u00b6","text":"

The /tilejson.json endpoint will provide a parameterized xyz tile URL that can be added to an interactive map.

"},{"location":"examples/rasterio_backend_example/#render-ndvi-using-the-expression-parameter","title":"Render NDVI using the expression parameter\u00b6","text":"

The expression parameter can be used to render images from an expression of a combination of the individual bands.

"},{"location":"examples/rasterio_backend_example/#geojson-statistics","title":"GeoJSON Statistics\u00b6","text":"

The /statistics endpoint can be used to get summary statistics for a geojson Feature or FeatureCollection.

"},{"location":"examples/time_series_example/","title":"time series API","text":"In\u00a0[1]: Copied!
from IPython.display import IFrame\n\n# if running titiler-cmr in the docker network\n# titiler_endpoint = \"http://localhost:8081\"\n\n# titiler-cmr-staging deployment\ntitiler_endpoint = \"https://dev-titiler-cmr.delta-backend.com\"\n\nIFrame(f\"{titiler_endpoint}/api.html#Timeseries\", 900, 500)\n
from IPython.display import IFrame # if running titiler-cmr in the docker network # titiler_endpoint = \"http://localhost:8081\" # titiler-cmr-staging deployment titiler_endpoint = \"https://dev-titiler-cmr.delta-backend.com\" IFrame(f\"{titiler_endpoint}/api.html#Timeseries\", 900, 500) Out[1]: In\u00a0[2]: Copied!
import json\nfrom datetime import datetime\n\nimport httpx\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom folium import LayerControl, Map, TileLayer\nfrom geojson_pydantic import Feature, Polygon\nfrom IPython.display import Image, display\n
import json from datetime import datetime import httpx import matplotlib.pyplot as plt import numpy as np from folium import LayerControl, Map, TileLayer from geojson_pydantic import Feature, Polygon from IPython.display import Image, display In\u00a0[3]: Copied!
concept_id = \"C2036881735-POCLOUD\"\n
concept_id = \"C2036881735-POCLOUD\"

The /timeseries GET endpoint is useful for demonstrating how the timeseries family of endpoints constructs sub-requests. It returns the list of titiler.cmr query parameters (datetime and concept_id) that will be used to generate the timeseries results.

In\u00a0[4]: Copied!
response = httpx.get(\n    f\"{titiler_endpoint}/timeseries\",\n    params={\n        \"concept_id\": concept_id,\n        \"datetime\": \"2024-10-01T00:00:01Z/2024-10-05T00:00:01Z\",\n    },\n    timeout=None,\n).json()\n\nprint(json.dumps(response, indent=2))\n
response = httpx.get( f\"{titiler_endpoint}/timeseries\", params={ \"concept_id\": concept_id, \"datetime\": \"2024-10-01T00:00:01Z/2024-10-05T00:00:01Z\", }, timeout=None, ).json() print(json.dumps(response, indent=2))
[\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-01T12:00:00+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-02T12:00:00+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-03T12:00:00+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-04T12:00:00+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-05T12:00:00+00:00\"\n  }\n]\n
In\u00a0[5]: Copied!
response = httpx.get(\n    f\"{titiler_endpoint}/timeseries\",\n    params={\n        \"concept_id\": concept_id,\n        \"datetime\": \"2024-10-01T00:00:01Z/2024-10-30T00:00:01Z\",\n        \"step\": \"P1W\",\n        \"temporal_mode\": \"point\",\n    }\n).json()\n\nprint(json.dumps(response, indent=2))\n
response = httpx.get( f\"{titiler_endpoint}/timeseries\", params={ \"concept_id\": concept_id, \"datetime\": \"2024-10-01T00:00:01Z/2024-10-30T00:00:01Z\", \"step\": \"P1W\", \"temporal_mode\": \"point\", } ).json() print(json.dumps(response, indent=2))
[\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-01T00:00:01+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-08T00:00:01+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-15T00:00:01+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-22T00:00:01+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-29T00:00:01+00:00\"\n  }\n]\n
In\u00a0[6]: Copied!
response = httpx.get(\n    f\"{titiler_endpoint}/timeseries\",\n    params={\n        \"concept_id\": concept_id,\n        \"datetime\": \"2024-10-01T00:00:01Z/2024-10-30T00:00:01Z\",\n        \"step\": \"P1W\",\n        \"temporal_mode\": \"interval\",\n    }\n).json()\n\nprint(json.dumps(response, indent=2))\n
response = httpx.get( f\"{titiler_endpoint}/timeseries\", params={ \"concept_id\": concept_id, \"datetime\": \"2024-10-01T00:00:01Z/2024-10-30T00:00:01Z\", \"step\": \"P1W\", \"temporal_mode\": \"interval\", } ).json() print(json.dumps(response, indent=2))
[\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-01T00:00:01+00:00/2024-10-08T00:00:00+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-08T00:00:01+00:00/2024-10-15T00:00:00+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-15T00:00:01+00:00/2024-10-22T00:00:00+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-22T00:00:01+00:00/2024-10-29T00:00:00+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-29T00:00:01+00:00/2024-10-30T00:00:01+00:00\"\n  }\n]\n
In\u00a0[7]: Copied!
response = httpx.get(\n    f\"{titiler_endpoint}/timeseries\",\n    params={\n        \"concept_id\": concept_id,\n        \"datetime\": \",\".join(\n            [\"2024-10-01T00:00:01Z\", \"2024-10-07T00:00:01Z/2024-10-09T23:59:59Z\"]\n        ),\n    }\n).json()\n\nprint(json.dumps(response, indent=2))\n
response = httpx.get( f\"{titiler_endpoint}/timeseries\", params={ \"concept_id\": concept_id, \"datetime\": \",\".join( [\"2024-10-01T00:00:01Z\", \"2024-10-07T00:00:01Z/2024-10-09T23:59:59Z\"] ), } ).json() print(json.dumps(response, indent=2))
[\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-01T00:00:01+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-07T12:00:00+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-08T12:00:00+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-09T12:00:00+00:00\"\n  }\n]\n
In\u00a0[8]: Copied!
minx, miny, maxx, maxy = -180, -90, 180, 90\nrequest = httpx.get(\n    f\"{titiler_endpoint}/timeseries/bbox/{minx},{miny},{maxx},{maxy}.gif\",\n    params={\n        \"concept_id\": concept_id,\n        \"datetime\": \"2023-11-01T00:00:01Z/2024-10-30T23:59:59Z\",\n        \"step\": \"P2W\",\n        \"temporal_mode\": \"point\",\n        \"variable\": \"analysed_sst\",\n        \"backend\": \"xarray\",\n        \"colormap_name\": \"thermal\",\n        \"rescale\": [[273, 315]],\n    },\n    timeout=None,\n)\ndisplay(Image(request.content))\n
minx, miny, maxx, maxy = -180, -90, 180, 90 request = httpx.get( f\"{titiler_endpoint}/timeseries/bbox/{minx},{miny},{maxx},{maxy}.gif\", params={ \"concept_id\": concept_id, \"datetime\": \"2023-11-01T00:00:01Z/2024-10-30T23:59:59Z\", \"step\": \"P2W\", \"temporal_mode\": \"point\", \"variable\": \"analysed_sst\", \"backend\": \"xarray\", \"colormap_name\": \"thermal\", \"rescale\": [[273, 315]], }, timeout=None, ) display(Image(request.content)) In\u00a0[9]: Copied!
minx, miny, maxx, maxy = -91.464,47.353,-90.466,47.974\nrequest = httpx.get(\n    f\"{titiler_endpoint}/timeseries/bbox/{minx},{miny},{maxx},{maxy}/512x512.gif\",\n    params={\n        \"concept_id\": \"C2021957657-LPCLOUD\",\n        \"datetime\": \"2024-01-01T00:00:00Z/2024-11-30T00:00:00Z\",\n        \"step\": \"P1W\",\n        \"temporal_mode\": \"interval\",\n        \"backend\": \"rasterio\",\n        \"bands_regex\":  \"B[0-9][0-9]\",\n        \"bands\": [\"B04\", \"B03\", \"B02\"],\n        \"color_formula\": \"Gamma RGB 3.5 Saturation 1.7 Sigmoidal RGB 15 0.35\",\n        \"fps\": 5,\n    },\n    timeout=None,\n)\ndisplay(Image(request.content))\n
minx, miny, maxx, maxy = -91.464,47.353,-90.466,47.974 request = httpx.get( f\"{titiler_endpoint}/timeseries/bbox/{minx},{miny},{maxx},{maxy}/512x512.gif\", params={ \"concept_id\": \"C2021957657-LPCLOUD\", \"datetime\": \"2024-01-01T00:00:00Z/2024-11-30T00:00:00Z\", \"step\": \"P1W\", \"temporal_mode\": \"interval\", \"backend\": \"rasterio\", \"bands_regex\": \"B[0-9][0-9]\", \"bands\": [\"B04\", \"B03\", \"B02\"], \"color_formula\": \"Gamma RGB 3.5 Saturation 1.7 Sigmoidal RGB 15 0.35\", \"fps\": 5, }, timeout=None, ) display(Image(request.content)) In\u00a0[10]: Copied!
%%time\nminx, miny, maxx, maxy = -98.676, 18.857, -81.623, 31.097\ngeojson = Feature(\n    type=\"Feature\",\n    geometry=Polygon.from_bounds(minx, miny, maxx, maxy),\n    properties={},\n)\nrequest = httpx.post(\n    f\"{titiler_endpoint}/timeseries/statistics\",\n    params={\n        \"concept_id\": concept_id,\n        \"datetime\": \"2022-02-01T00:00:01Z/2024-10-30T23:59:59Z\",\n        \"step\": \"P1D\",\n        \"temporal_mode\": \"point\",\n        \"variable\": \"analysed_sst\",\n        \"backend\": \"xarray\",\n    },\n    json=geojson.model_dump(exclude_none=True),\n    timeout=None,\n)\nrequest.raise_for_status()\nresponse = request.json()\n
%%time minx, miny, maxx, maxy = -98.676, 18.857, -81.623, 31.097 geojson = Feature( type=\"Feature\", geometry=Polygon.from_bounds(minx, miny, maxx, maxy), properties={}, ) request = httpx.post( f\"{titiler_endpoint}/timeseries/statistics\", params={ \"concept_id\": concept_id, \"datetime\": \"2022-02-01T00:00:01Z/2024-10-30T23:59:59Z\", \"step\": \"P1D\", \"temporal_mode\": \"point\", \"variable\": \"analysed_sst\", \"backend\": \"xarray\", }, json=geojson.model_dump(exclude_none=True), timeout=None, ) request.raise_for_status() response = request.json()
CPU times: user 42.8 ms, sys: 7.34 ms, total: 50.1 ms\nWall time: 21.6 s\n

The /timeseries/statistics endpoint returns the GeoJSON with statistics for each step in the time series embedded in the properties.

In\u00a0[11]: Copied!
stats = response[\"properties\"][\"statistics\"]\nprint(len(stats))\n\nstats_preview = {timestamp: sst_stats for i, (timestamp, sst_stats) in enumerate(stats.items()) if i < 2}\nprint(json.dumps(stats_preview, indent=2))\n
stats = response[\"properties\"][\"statistics\"] print(len(stats)) stats_preview = {timestamp: sst_stats for i, (timestamp, sst_stats) in enumerate(stats.items()) if i < 2} print(json.dumps(stats_preview, indent=2))
1001\n{\n  \"2022-02-01T00:00:01+00:00\": {\n    \"analysed_sst\": {\n      \"min\": 285.27000000000004,\n      \"max\": 300.34000000000003,\n      \"mean\": 296.3800266967469,\n      \"count\": 2337.9599609375,\n      \"sum\": 692924.6356385816,\n      \"std\": 2.701563618833078,\n      \"median\": 296.83000000000004,\n      \"majority\": 300.16,\n      \"minority\": 285.27000000000004,\n      \"unique\": 819.0,\n      \"histogram\": [\n        [\n          14,\n          31,\n          40,\n          62,\n          88,\n          154,\n          321,\n          853,\n          378,\n          422\n        ],\n        [\n          285.27000000000004,\n          286.77700000000004,\n          288.28400000000005,\n          289.79100000000005,\n          291.29800000000006,\n          292.80500000000006,\n          294.312,\n          295.819,\n          297.326,\n          298.833,\n          300.34000000000003\n        ]\n      ],\n      \"valid_percent\": 68.49,\n      \"masked_pixels\": 1087.0,\n      \"valid_pixels\": 2363.0,\n      \"percentile_2\": 288.46000000000004,\n      \"percentile_98\": 300.20000000000005\n    }\n  },\n  \"2022-02-02T00:00:01+00:00\": {\n    \"analysed_sst\": {\n      \"min\": 285.45000000000005,\n      \"max\": 300.36,\n      \"mean\": 296.3582956145494,\n      \"count\": 2337.9599609375,\n      \"sum\": 692873.8292384959,\n      \"std\": 2.658495800828904,\n      \"median\": 296.79,\n      \"majority\": 296.59000000000003,\n      \"minority\": 285.45000000000005,\n      \"unique\": 827.0,\n      \"histogram\": [\n        [\n          14,\n          27,\n          51,\n          56,\n          90,\n          157,\n          332,\n          899,\n          329,\n          408\n        ],\n        [\n          285.45000000000005,\n          286.94100000000003,\n          288.432,\n          289.92300000000006,\n          291.41400000000004,\n          292.90500000000003,\n          294.396,\n          295.887,\n          297.37800000000004,\n          298.869,\n          300.36\n        ]\n      ],\n      \"valid_percent\": 68.49,\n      \"masked_pixels\": 1087.0,\n      \"valid_pixels\": 2363.0,\n      \"percentile_2\": 288.69000000000005,\n      \"percentile_98\": 300.15000000000003\n    }\n  }\n}\n

The statistics output can be used to generate plots like this:

In\u00a0[12]: Copied!
data = response['properties']['statistics']\n\ndates = []\nmeans = []\nstds = []\n\nfor date_str, values in data.items():\n    dates.append(datetime.fromisoformat(date_str))\n    means.append(values[\"analysed_sst\"][\"mean\"])\n    stds.append(values[\"analysed_sst\"][\"std\"])\n\nplt.figure(figsize=(10, 6))\n\nplt.plot(dates, means, \"b-\", label=\"Mean\")\n\nplt.fill_between(\n    dates, \n    np.array(means) - np.array(stds),\n    np.array(means) + np.array(stds),\n    alpha=0.2,\n    color=\"b\",\n    label=\"Standard Deviation\",\n)\n\nplt.xlabel(\"Date\")\nplt.ylabel(\"Temperature (K)\")\nplt.title(\"Mean sea surface temperature in the Gulf of Mexico\")\nplt.legend()\n\nplt.xticks(rotation=45)\n\nplt.tight_layout()\n\nplt.show()\n
data = response['properties']['statistics'] dates = [] means = [] stds = [] for date_str, values in data.items(): dates.append(datetime.fromisoformat(date_str)) means.append(values[\"analysed_sst\"][\"mean\"]) stds.append(values[\"analysed_sst\"][\"std\"]) plt.figure(figsize=(10, 6)) plt.plot(dates, means, \"b-\", label=\"Mean\") plt.fill_between( dates, np.array(means) - np.array(stds), np.array(means) + np.array(stds), alpha=0.2, color=\"b\", label=\"Standard Deviation\", ) plt.xlabel(\"Date\") plt.ylabel(\"Temperature (K)\") plt.title(\"Mean sea surface temperature in the Gulf of Mexico\") plt.legend() plt.xticks(rotation=45) plt.tight_layout() plt.show() In\u00a0[13]: Copied!
minx, miny, maxx, maxy = -180, -90, 180, 90\nrequest = httpx.get(\n    f\"{titiler_endpoint}/timeseries/WebMercatorQuad/tilejson.json\",\n    params={\n        \"concept_id\": concept_id,\n        \"datetime\": \"2023-11-01T00:00:01Z/2024-10-30T23:59:59Z\",\n        \"step\": \"P1M\",\n        \"temporal_mode\": \"point\",\n        \"variable\": \"sea_ice_fraction\",\n        \"backend\": \"xarray\",\n        \"colormap_name\": \"blues_r\",\n        \"rescale\": [[0, 1]],\n    },\n    timeout=None,\n)\ntilejsons = request.json()\ntilejson_preview = {\n    timestamp: tilejson\n    for i, (timestamp, tilejson) in enumerate(tilejsons.items())\n    if i < 2\n}\nprint(json.dumps(tilejson_preview, indent=2))\n
minx, miny, maxx, maxy = -180, -90, 180, 90 request = httpx.get( f\"{titiler_endpoint}/timeseries/WebMercatorQuad/tilejson.json\", params={ \"concept_id\": concept_id, \"datetime\": \"2023-11-01T00:00:01Z/2024-10-30T23:59:59Z\", \"step\": \"P1M\", \"temporal_mode\": \"point\", \"variable\": \"sea_ice_fraction\", \"backend\": \"xarray\", \"colormap_name\": \"blues_r\", \"rescale\": [[0, 1]], }, timeout=None, ) tilejsons = request.json() tilejson_preview = { timestamp: tilejson for i, (timestamp, tilejson) in enumerate(tilejsons.items()) if i < 2 } print(json.dumps(tilejson_preview, indent=2))
{\n  \"2023-11-01T00:00:01+00:00\": {\n    \"tilejson\": \"2.2.0\",\n    \"version\": \"1.0.0\",\n    \"scheme\": \"xyz\",\n    \"tiles\": [\n      \"https://dev-titiler-cmr.delta-backend.com/tiles/WebMercatorQuad/{z}/{x}/{y}@1x?concept_id=C2036881735-POCLOUD&variable=sea_ice_fraction&backend=xarray&colormap_name=blues_r&rescale=%5B0%2C+1%5D&concept_id=C2036881735-POCLOUD&datetime=2023-11-01T00%3A00%3A01%2B00%3A00\"\n    ],\n    \"minzoom\": 0,\n    \"maxzoom\": 24,\n    \"bounds\": [\n      -180.0,\n      -90.0,\n      180.0,\n      90.0\n    ],\n    \"center\": [\n      0.0,\n      0.0,\n      0\n    ]\n  },\n  \"2023-12-01T00:00:01+00:00\": {\n    \"tilejson\": \"2.2.0\",\n    \"version\": \"1.0.0\",\n    \"scheme\": \"xyz\",\n    \"tiles\": [\n      \"https://dev-titiler-cmr.delta-backend.com/tiles/WebMercatorQuad/{z}/{x}/{y}@1x?concept_id=C2036881735-POCLOUD&variable=sea_ice_fraction&backend=xarray&colormap_name=blues_r&rescale=%5B0%2C+1%5D&concept_id=C2036881735-POCLOUD&datetime=2023-12-01T00%3A00%3A01%2B00%3A00\"\n    ],\n    \"minzoom\": 0,\n    \"maxzoom\": 24,\n    \"bounds\": [\n      -180.0,\n      -90.0,\n      180.0,\n      90.0\n    ],\n    \"center\": [\n      0.0,\n      0.0,\n      0\n    ]\n  }\n}\n
In\u00a0[14]: Copied!
m = Map(location=[0, 0], zoom_start=3, min_zoom=3)\nfor datetime_, tilejson in tilejsons.items():\n    label = datetime.fromisoformat(datetime_).strftime(\"%Y-%m\")\n    TileLayer(\n        tiles=tilejson[\"tiles\"][0],\n        attr=\"GAMSSA SST\",\n        overlay=True,\n        name=label,\n        show=False,\n    ).add_to(m)\nLayerControl(collapsed=False).add_to(m)\nm\n
m = Map(location=[0, 0], zoom_start=3, min_zoom=3) for datetime_, tilejson in tilejsons.items(): label = datetime.fromisoformat(datetime_).strftime(\"%Y-%m\") TileLayer( tiles=tilejson[\"tiles\"][0], attr=\"GAMSSA SST\", overlay=True, name=label, show=False, ).add_to(m) LayerControl(collapsed=False).add_to(m) m Out[14]: Make this Notebook Trusted to load map: File -> Trust Notebook"},{"location":"examples/time_series_example/#time-series-api","title":"time series API\u00b6","text":"

There is a family of /timeseries endpoints in the titiler.cmr API that can be used to generate time-aware responses.

The timeseries extension provides endpoints for requesting results for all points or intervals along a time series. The /timeseries family of endpoints works by converting the provided time series parameters (datetime, step, and temporal_mode) into a set of datetime query parameters for the corresponding lower-level endpoint, running asynchronous requests to the lower-level endpoint, then collecting the results and formatting them in a coherent format for the user.

The time series structure is defined by the datetime, step, and temporal_mode parameters.

The temporal_mode mode parameter controls whether or not CMR is queried for a particular point-in-time (temporal_mode=point) or over an entire interval (temporal_mode=interval). In general, it is best to use temporal_mode=point for datasets where granules overlap completely in space (e.g. daily sea surface temperature predictions) because the /timeseries endpoints will create a mosaic of all assets returned by the query and the first asset to cover a pixel will be used. For datasets where it requires granules from multiple timestamps to fully cover an AOI, temporal_mode=interval is appropriate. For example, you can get weekly composites of satellite imagery for visualization purposes with step=P1W & temporal_mode=interval.

"},{"location":"examples/time_series_example/#time-series-parameters","title":"time series parameters\u00b6","text":"

The time series API makes it possible to return results for many points along a timeseries with a single request. The available parameters are:

  • datetime (str): Either a date-time, an interval, or a comma-separated list of date-times or intervals. Date and time expressions adhere to rfc3339 ('2020-06-01T09:00:00Z') format.
  • step (str): width of individual timesteps expressed as a IS8601 duration
  • temporal_mode (str): if \"point\", queries will be made for the individual timestamps along the timeseries. If \"interval\", queries will be made for the periods between each timestamp along the timeseries.

There are many ways to combine the parameters to produce a time series.

  1. Exact points in time from a start to and end datetime:
  • provide datetime={start_datetime}/{end_datetime}, step={step_width}, and temporal_mode=point where step_width is something like P1D for daily or P2W for bi-weekly.
  • provide datetime={start_datetime}/{end_datetime}, and temporal_mode=point without step to get a point for every unique timestamp in the granules between start_datetime and end_datetime.
  1. Fixed-width intervals between a start and end datetime:
  • provide datetime={start_datetime}/{end_datetime}, step, and temporal_mode=interval
  1. Specific datetimes
  • provide datetime=2024-10-01T00:00:01Z,2024-10-02T00:00:01Z
  1. Specific datetime intervals
  • provide datetime=2024-10-01T00:00:01Z/2024-10-01T23:59:59Z,2024-10-05T00:00:01Z/2024-10-05T23:59:59Z
"},{"location":"examples/time_series_example/#how-to-use-the-timeseries-api-with-titilercmr","title":"How to use the timeseries API with titiler.cmr\u00b6","text":"

The /timeseries endpoints work by interpreting the time series parameters (e.g. datetime and step) and parameterizing a set of lower-level requests to the related endpoint. For example, a request to /timeseries/statistics for a set of four points in time each one week apart will fire off four requests to the /statistics endpoint with a particular value in the datetime parameter. The results are collected and returned in a coherent format that can be consumed in a table or a chart.

Every /timeseries request in titiler.cmr will require both a concept_id and a set of time series parameters. The GHRSST Level 4 GAMSSA_28km Global Foundation Sea Surface Temperature Analysis v1.0 dataset (GDS2) is a useful dataset for demo purposes because the granule assets are small (~1MB each).

"},{"location":"examples/time_series_example/#time-series-for-all-granules-between-a-startend-datetime","title":"Time series for all granules between a start/end datetime\u00b6","text":"

For some datasets that have granules that are regularly spaced in time (e.g. daily), it is useful to be able to quickly specify a summary of all points in time between a start and end datetime. You can do that by simply providing the start_datetime and end_datetime parameters. The application will query CMR and produce a list of unique datetime values from the results of the granule search. If a granule represents a datetime range, it will return the midpoint between the start and end for a single granule.

"},{"location":"examples/time_series_example/#weekly-timeseries","title":"Weekly timeseries\u00b6","text":"

Sometimes you might be interested in a report with lower temporal resolution than the maximum availble for a dataset. By setting step=\"P1W\" and temporal_mode=\"point\", you can get a weekly series.

"},{"location":"examples/time_series_example/#periodic-timeseries","title":"Periodic timeseries\u00b6","text":"

Some datasets (like satellite imagery) may consist of granules that do not fully cover an arbitrary area of interest. In this case it is useful to construct a time series from a set of datetime ranges so that granules can be mosaiced to ensure each step has full coverage.

To create a set of non-overlapping week-long datetime ranges, you can modify the query to use temporal_mode=\"interval\" which will create ranges that start on the weekly values returned in the previous query and extend up to the second before the next value in the series.

"},{"location":"examples/time_series_example/#custom-time-series","title":"Custom time series\u00b6","text":"

If you want to specify the exact datetime values for a time series and you either cannot do not want to use the time series parameters, you can supply a set of comma-separated datetimes and/or datetime ranges to the datetime parameter.

"},{"location":"examples/time_series_example/#example-sea-surface-temperature-gif","title":"Example: sea surface temperature GIF\u00b6","text":"

The /timeseries/bbox endpoint can be used to produce a GIF that shows a visualization of granules over time.

The example below shows biweekly sea surface temperature estimates from the GAMSSA dataset for the period from November 2023 through October 2024.

"},{"location":"examples/time_series_example/#example-hlsl30-gif","title":"Example: HLSL30 GIF\u00b6","text":"

The example below shows a weekly mosaic of imagery from the Harmonized Landsat Sentinel L30 (HLSL30) collection for the period from January to November 2024.

"},{"location":"examples/time_series_example/#example-sea-surface-temperature-statistics","title":"Example: sea surface temperature statistics\u00b6","text":"

The /timeseries/statistics endpoint will produce summary statistics for an AOI for all points along a timeseries.

The example below shows daily sea surface temperature summary statistics for the Gulf of Mexico from the GAMSSA dataset for the period from February 2022 through October 2024.

"},{"location":"examples/time_series_example/#example-time-series-raster-tiles","title":"Example: Time series raster tiles\u00b6","text":"

It could be useful to allow users to select a timestep in an interactive map. You can use the /timeseries/tilejson endpoint for that purpose. The following example shows how you could use it to provide time series capability to an interactive map of sea ice cover.

"},{"location":"examples/xarray_backend_example/","title":"xarray backend: MUR SST","text":"In\u00a0[1]: Copied!
import json\nfrom datetime import datetime, timezone\n\nimport earthaccess\nimport httpx\nimport xarray as xr\nfrom folium import GeoJson, Map, TileLayer\n\n# titiler_endpoint = \"http://localhost:8081\"  # docker network endpoint\ntitiler_endpoint = \"https://dev-titiler-cmr.delta-backend.com\"  # deployed endpoint\n
import json from datetime import datetime, timezone import earthaccess import httpx import xarray as xr from folium import GeoJson, Map, TileLayer # titiler_endpoint = \"http://localhost:8081\" # docker network endpoint titiler_endpoint = \"https://dev-titiler-cmr.delta-backend.com\" # deployed endpoint In\u00a0[2]: Copied!
datasets = earthaccess.search_datasets(doi=\"10.5067/GHGMR-4FJ04\")\nds = datasets[0]\n\nconcept_id = ds[\"meta\"][\"concept-id\"]\nprint(\"Concept-Id: \", concept_id)\n\nprint(\"Abstract: \", ds[\"umm\"][\"Abstract\"])\n
datasets = earthaccess.search_datasets(doi=\"10.5067/GHGMR-4FJ04\") ds = datasets[0] concept_id = ds[\"meta\"][\"concept-id\"] print(\"Concept-Id: \", concept_id) print(\"Abstract: \", ds[\"umm\"][\"Abstract\"])
Concept-Id:  C1996881146-POCLOUD\nAbstract:  A Group for High Resolution Sea Surface Temperature (GHRSST) Level 4 sea surface temperature analysis produced as a retrospective dataset (four day latency) and near-real-time dataset (one day latency) at the JPL Physical Oceanography DAAC using wavelets as basis functions in an optimal interpolation approach on a global 0.01 degree grid. The version 4 Multiscale Ultrahigh Resolution (MUR) L4 analysis is based upon nighttime GHRSST L2P skin and subskin SST observations from several instruments including the NASA Advanced Microwave Scanning Radiometer-EOS (AMSR-E), the JAXA Advanced Microwave Scanning Radiometer 2 on GCOM-W1, the Moderate Resolution Imaging Spectroradiometers (MODIS) on the NASA Aqua and Terra platforms, the US Navy microwave WindSat radiometer, the Advanced Very High Resolution Radiometer (AVHRR) on several NOAA satellites, and in situ SST observations from the NOAA iQuam project. The ice concentration data are from the archives at the EUMETSAT Ocean and Sea Ice Satellite Application Facility (OSI SAF) High Latitude Processing Center and are also used for an improved SST parameterization for the high-latitudes.  The dataset also contains additional variables for some granules including a SST anomaly derived from a MUR climatology and the temporal distance to the nearest IR measurement for each pixel.This dataset is funded by the NASA MEaSUREs program ( http://earthdata.nasa.gov/our-community/community-data-system-programs/measures-projects ), and created by a team led by Dr. Toshio M. Chin from JPL. It adheres to the GHRSST Data Processing Specification (GDS) version 2 format specifications. Use the file global metadata \"history:\" attribute to determine if a granule is near-realtime or retrospective.\n
In\u00a0[3]: Copied!
results = earthaccess.search_data(\n    count=1,\n    concept_id=concept_id,\n    temporal=(\"2024-10-12\", \"2024-10-13\"),\n)\nprint(\"Granules:\")\nprint(results)\nprint()\nprint(\"Example of NetCDF URL: \")\nfor link in results[0].data_links(access=\"external\"):\n    print(link)\n
results = earthaccess.search_data( count=1, concept_id=concept_id, temporal=(\"2024-10-12\", \"2024-10-13\"), ) print(\"Granules:\") print(results) print() print(\"Example of NetCDF URL: \") for link in results[0].data_links(access=\"external\"): print(link)
Granules:\n[Collection: {'Version': '4.1', 'ShortName': 'MUR-JPL-L4-GLOB-v4.1'}\nSpatial coverage: {'HorizontalSpatialDomain': {'Geometry': {'BoundingRectangles': [{'WestBoundingCoordinate': -180, 'SouthBoundingCoordinate': -90, 'EastBoundingCoordinate': 180, 'NorthBoundingCoordinate': 90}]}}}\nTemporal coverage: {'RangeDateTime': {'EndingDateTime': '2024-10-12T21:00:00.000Z', 'BeginningDateTime': '2024-10-11T21:00:00.000Z'}}\nSize(MB): 707.340648651123\nData: ['https://archive.podaac.earthdata.nasa.gov/podaac-ops-cumulus-protected/MUR-JPL-L4-GLOB-v4.1/20241012090000-JPL-L4_GHRSST-SSTfnd-MUR-GLOB-v02.0-fv04.1.nc']]\n\nExample of NetCDF URL: \nhttps://archive.podaac.earthdata.nasa.gov/podaac-ops-cumulus-protected/MUR-JPL-L4-GLOB-v4.1/20241012090000-JPL-L4_GHRSST-SSTfnd-MUR-GLOB-v02.0-fv04.1.nc\n
In\u00a0[4]: Copied!
fs = earthaccess.get_fsspec_https_session()\n\nds = xr.open_dataset(\n    fs.open(results[0].data_links(access=\"external\")[0]),\n    engine=\"h5netcdf\",\n)\nprint(\"Data Variables:\")\nfor var in ds.data_vars:\n    print(str(var))\n\ndisplay(ds)\n
fs = earthaccess.get_fsspec_https_session() ds = xr.open_dataset( fs.open(results[0].data_links(access=\"external\")[0]), engine=\"h5netcdf\", ) print(\"Data Variables:\") for var in ds.data_vars: print(str(var)) display(ds)
Data Variables:\nanalysed_sst\nanalysis_error\nmask\nsea_ice_fraction\ndt_1km_data\nsst_anomaly\n
<xarray.Dataset> Size: 29GB\nDimensions:           (time: 1, lat: 17999, lon: 36000)\nCoordinates:\n  * time              (time) datetime64[ns] 8B 2024-10-12T09:00:00\n  * lat               (lat) float32 72kB -89.99 -89.98 -89.97 ... 89.98 89.99\n  * lon               (lon) float32 144kB -180.0 -180.0 -180.0 ... 180.0 180.0\nData variables:\n    analysed_sst      (time, lat, lon) float64 5GB ...\n    analysis_error    (time, lat, lon) float64 5GB ...\n    mask              (time, lat, lon) float32 3GB ...\n    sea_ice_fraction  (time, lat, lon) float64 5GB ...\n    dt_1km_data       (time, lat, lon) timedelta64[ns] 5GB ...\n    sst_anomaly       (time, lat, lon) float64 5GB ...\nAttributes: (12/47)\n    Conventions:                CF-1.7\n    title:                      Daily MUR SST, Final product\n    summary:                    A merged, multi-sensor L4 Foundation SST anal...\n    references:                 http://podaac.jpl.nasa.gov/Multi-scale_Ultra-...\n    institution:                Jet Propulsion Laboratory\n    history:                    created at nominal 4-day latency; replaced nr...\n    ...                         ...\n    project:                    NASA Making Earth Science Data Records for Us...\n    publisher_name:             GHRSST Project Office\n    publisher_url:              http://www.ghrsst.org\n    publisher_email:            ghrsst-po@nceo.ac.uk\n    processing_level:           L4\n    cdm_data_type:              grid
xarray.Dataset
  • Dimensions:
    • time: 1
    • lat: 17999
    • lon: 36000
  • Coordinates: (3)
    • time(time)datetime64[ns]2024-10-12T09:00:00long_name :reference time of sst fieldstandard_name :timeaxis :Tcomment :Nominal time of analyzed fields
      array(['2024-10-12T09:00:00.000000000'], dtype='datetime64[ns]')
    • lat(lat)float32-89.99 -89.98 ... 89.98 89.99long_name :latitudestandard_name :latitudeaxis :Yunits :degrees_northvalid_min :-90.0valid_max :90.0comment :geolocations inherited from the input data without correction
      array([-89.99, -89.98, -89.97, ...,  89.97,  89.98,  89.99], dtype=float32)
    • lon(lon)float32-180.0 -180.0 ... 180.0 180.0long_name :longitudestandard_name :longitudeaxis :Xunits :degrees_eastvalid_min :-180.0valid_max :180.0comment :geolocations inherited from the input data without correction
      array([-179.99, -179.98, -179.97, ...,  179.98,  179.99,  180.  ],\n      dtype=float32)
  • Data variables: (6)
    • analysed_sst(time, lat, lon)float64...long_name :analysed sea surface temperaturestandard_name :sea_surface_foundation_temperatureunits :kelvinvalid_min :-32767valid_max :32767comment :\"Final\" version using Multi-Resolution Variational Analysis (MRVA) method for interpolationsource :MODIS_T-JPL, MODIS_A-JPL, AVHRRMTB_G-NAVO, iQUAM-NOAA/NESDIS, Ice_Conc-OSISAF
      [647964000 values with dtype=float64]
    • analysis_error(time, lat, lon)float64...long_name :estimated error standard deviation of analysed_sstunits :kelvinvalid_min :0valid_max :32767comment :uncertainty in \"analysed_sst\"
      [647964000 values with dtype=float64]
    • mask(time, lat, lon)float32...long_name :sea/land field composite maskvalid_min :1valid_max :31flag_masks :[ 1 2 4 8 16]flag_meanings :open_sea land open_lake open_sea_with_ice_in_the_grid open_lake_with_ice_in_the_gridcomment :mask can be used to further filter the data.source :GMT \"grdlandmask\", ice flag from sea_ice_fraction data
      [647964000 values with dtype=float32]
    • sea_ice_fraction(time, lat, lon)float64...long_name :sea ice area fractionstandard_name :sea_ice_area_fractionvalid_min :0valid_max :100source :EUMETSAT OSI-SAF, copyright EUMETSATcomment :ice fraction is a dimensionless quantity between 0 and 1; it has been interpolated by a nearest neighbor approach; EUMETSAT OSI-SAF files used: ice_conc_nh_polstere-100_multi_202410121200.nc, ice_conc_sh_polstere-100_multi_202410121200.nc.
      [647964000 values with dtype=float64]
    • dt_1km_data(time, lat, lon)timedelta64[ns]...long_name :time to most recent 1km datavalid_min :-127valid_max :127source :MODIS and VIIRS pixels ingested by MURcomment :The grid value is hours between the analysis time and the most recent MODIS or VIIRS 1km L2P datum within 0.01 degrees from the grid point. \"Fill value\" indicates absence of such 1km data at the grid point.
      [647964000 values with dtype=timedelta64[ns]]
    • sst_anomaly(time, lat, lon)float64...long_name :SST anomaly from a seasonal SST climatology based on the MUR data over 2003-2014 periodunits :kelvinvalid_min :-32767valid_max :32767comment :anomaly reference to the day-of-year average between 2003 and 2014
      [647964000 values with dtype=float64]
  • Indexes: (3)
    • timePandasIndex
      PandasIndex(DatetimeIndex(['2024-10-12 09:00:00'], dtype='datetime64[ns]', name='time', freq=None))
    • latPandasIndex
      PandasIndex(Index([-89.98999786376953,  -89.9800033569336, -89.97000122070312,\n       -89.95999908447266, -89.94999694824219, -89.94000244140625,\n       -89.93000030517578, -89.91999816894531, -89.91000366210938,\n        -89.9000015258789,\n       ...\n         89.9000015258789,  89.91000366210938,  89.91999816894531,\n        89.93000030517578,  89.94000244140625,  89.94999694824219,\n        89.95999908447266,  89.97000122070312,   89.9800033569336,\n        89.98999786376953],\n      dtype='float32', name='lat', length=17999))
    • lonPandasIndex
      PandasIndex(Index([-179.99000549316406, -179.97999572753906, -179.97000122070312,\n        -179.9600067138672,  -179.9499969482422, -179.94000244140625,\n       -179.92999267578125,  -179.9199981689453, -179.91000366210938,\n       -179.89999389648438,\n       ...\n        179.91000366210938,   179.9199981689453,  179.92999267578125,\n        179.94000244140625,   179.9499969482422,   179.9600067138672,\n        179.97000122070312,  179.97999572753906,  179.99000549316406,\n                     180.0],\n      dtype='float32', name='lon', length=36000))
  • Attributes: (47)Conventions :CF-1.7title :Daily MUR SST, Final productsummary :A merged, multi-sensor L4 Foundation SST analysis product from JPL.references :http://podaac.jpl.nasa.gov/Multi-scale_Ultra-high_Resolution_MUR-SSTinstitution :Jet Propulsion Laboratoryhistory :created at nominal 4-day latency; replaced nrt (1-day latency) version.comment :MUR = \"Multi-scale Ultra-high Resolution\"license :These data are available free of charge under data policy of JPL PO.DAAC.id :MUR-JPL-L4-GLOB-v04.1naming_authority :org.ghrsstproduct_version :04.1uuid :27665bc0-d5fc-11e1-9b23-0800200c9a66gds_version_id :2.0netcdf_version_id :4.1date_created :20241021T071625Zstart_time :20241012T090000Zstop_time :20241012T090000Ztime_coverage_start :20241011T210000Ztime_coverage_end :20241012T210000Zfile_quality_level :3source :MODIS_T-JPL, MODIS_A-JPL, AVHRRMTB_G-NAVO, iQUAM-NOAA/NESDIS, Ice_Conc-OSISAFplatform :Terra, Aqua, MetOp-B, Buoys/Shipssensor :MODIS, AVHRR, in-situMetadata_Conventions :Unidata Observation Dataset v1.0metadata_link :http://podaac.jpl.nasa.gov/ws/metadata/dataset/?format=iso&shortName=MUR-JPL-L4-GLOB-v04.1keywords :Oceans > Ocean Temperature > Sea Surface Temperaturekeywords_vocabulary :NASA Global Change Master Directory (GCMD) Science Keywordsstandard_name_vocabulary :NetCDF Climate and Forecast (CF) Metadata Conventionsouthernmost_latitude :-90.0northernmost_latitude :90.0westernmost_longitude :-180.0easternmost_longitude :180.0spatial_resolution :0.01 degreesgeospatial_lat_units :degrees northgeospatial_lat_resolution :0.01geospatial_lon_units :degrees eastgeospatial_lon_resolution :0.01acknowledgment :Please acknowledge the use of these data with the following statement: These data were provided by JPL under support by NASA MEaSUREs program.creator_name :JPL MUR SST projectcreator_email :ghrsst@podaac.jpl.nasa.govcreator_url :http://mur.jpl.nasa.govproject :NASA Making Earth Science Data Records for Use in Research Environments (MEaSUREs) Programpublisher_name :GHRSST Project Officepublisher_url :http://www.ghrsst.orgpublisher_email :ghrsst-po@nceo.ac.ukprocessing_level :L4cdm_data_type :grid
In\u00a0[5]: Copied!
variable = \"sea_ice_fraction\"\ndatetime_ = datetime(2024, 10, 10, tzinfo=timezone.utc).isoformat()\n
variable = \"sea_ice_fraction\" datetime_ = datetime(2024, 10, 10, tzinfo=timezone.utc).isoformat() In\u00a0[6]: Copied!
r = httpx.get(\n    f\"{titiler_endpoint}/WebMercatorQuad/tilejson.json\",\n    params = (\n        (\"concept_id\", concept_id),\n        # Datetime in form of `start_date/end_date`\n        (\"datetime\", datetime_),\n        # titiler-cmr can work with both Zarr and COG dataset\n        # but we need to tell the endpoints in advance which backend\n        # to use\n        (\"backend\", \"xarray\"),\n        (\"variable\", variable),\n        # We need to set min/max zoom because we don't want to use lowerzoom level (e.g 0)\n        # which will results in useless large scale query\n        (\"minzoom\", 2),\n        (\"maxzoom\", 13),\n        (\"rescale\", \"0,1\"),\n        (\"colormap_name\", \"blues_r\"),\n    )\n).json()\n\nprint(r)\n
r = httpx.get( f\"{titiler_endpoint}/WebMercatorQuad/tilejson.json\", params = ( (\"concept_id\", concept_id), # Datetime in form of `start_date/end_date` (\"datetime\", datetime_), # titiler-cmr can work with both Zarr and COG dataset # but we need to tell the endpoints in advance which backend # to use (\"backend\", \"xarray\"), (\"variable\", variable), # We need to set min/max zoom because we don't want to use lowerzoom level (e.g 0) # which will results in useless large scale query (\"minzoom\", 2), (\"maxzoom\", 13), (\"rescale\", \"0,1\"), (\"colormap_name\", \"blues_r\"), ) ).json() print(r)
{'tilejson': '2.2.0', 'version': '1.0.0', 'scheme': 'xyz', 'tiles': ['https://dev-titiler-cmr.delta-backend.com/tiles/WebMercatorQuad/{z}/{x}/{y}@1x?concept_id=C1996881146-POCLOUD&datetime=2024-10-10T00%3A00%3A00%2B00%3A00&backend=xarray&variable=sea_ice_fraction&rescale=0%2C1&colormap_name=blues_r'], 'minzoom': 2, 'maxzoom': 13, 'bounds': [-180.0, -90.0, 180.0, 90.0], 'center': [0.0, 0.0, 2]}\n
In\u00a0[7]: Copied!
bounds = r[\"bounds\"]\nm = Map(\n    location=(70, -40),\n    zoom_start=3\n)\n\nTileLayer(\n    tiles=r[\"tiles\"][0],\n    opacity=1,\n    attr=\"NASA\",\n).add_to(m)\nm\n
bounds = r[\"bounds\"] m = Map( location=(70, -40), zoom_start=3 ) TileLayer( tiles=r[\"tiles\"][0], opacity=1, attr=\"NASA\", ).add_to(m) m Out[7]: Make this Notebook Trusted to load map: File -> Trust Notebook In\u00a0[8]: Copied!
geojson_dict = {\n  \"type\": \"FeatureCollection\",\n  \"features\": [\n    {\n      \"type\": \"Feature\",\n      \"properties\": {},\n      \"geometry\": {\n        \"coordinates\": [\n          [\n            [\n              -20.79973248834736,\n              83.55979308678764\n            ],\n            [\n              -20.79973248834736,\n              75.0115425216471\n            ],\n            [\n              14.483337068956956,\n              75.0115425216471\n            ],\n            [\n              14.483337068956956,\n              83.55979308678764\n            ],\n            [\n              -20.79973248834736,\n              83.55979308678764\n            ]\n          ]\n        ],\n        \"type\": \"Polygon\"\n      }\n    }\n  ]\n}\n\nr = httpx.post(\n    f\"{titiler_endpoint}/statistics\",\n    params=(\n        (\"concept_id\", concept_id),\n        # Datetime in form of `start_date/end_date`\n        (\"datetime\", datetime_),\n        # titiler-cmr can work with both Zarr and COG dataset\n        # but we need to tell the endpoints in advance which backend\n        # to use\n        (\"backend\", \"xarray\"),\n        (\"variable\", variable),\n    ),\n    json=geojson_dict,\n    timeout=60,\n).json()\n\nprint(json.dumps(r, indent=2))\n
geojson_dict = { \"type\": \"FeatureCollection\", \"features\": [ { \"type\": \"Feature\", \"properties\": {}, \"geometry\": { \"coordinates\": [ [ [ -20.79973248834736, 83.55979308678764 ], [ -20.79973248834736, 75.0115425216471 ], [ 14.483337068956956, 75.0115425216471 ], [ 14.483337068956956, 83.55979308678764 ], [ -20.79973248834736, 83.55979308678764 ] ] ], \"type\": \"Polygon\" } } ] } r = httpx.post( f\"{titiler_endpoint}/statistics\", params=( (\"concept_id\", concept_id), # Datetime in form of `start_date/end_date` (\"datetime\", datetime_), # titiler-cmr can work with both Zarr and COG dataset # but we need to tell the endpoints in advance which backend # to use (\"backend\", \"xarray\"), (\"variable\", variable), ), json=geojson_dict, timeout=60, ).json() print(json.dumps(r, indent=2))
{\n  \"type\": \"FeatureCollection\",\n  \"features\": [\n    {\n      \"type\": \"Feature\",\n      \"geometry\": {\n        \"type\": \"Polygon\",\n        \"coordinates\": [\n          [\n            [\n              -20.79973248834736,\n              83.55979308678764\n            ],\n            [\n              -20.79973248834736,\n              75.0115425216471\n            ],\n            [\n              14.483337068956956,\n              75.0115425216471\n            ],\n            [\n              14.483337068956956,\n              83.55979308678764\n            ],\n            [\n              -20.79973248834736,\n              83.55979308678764\n            ]\n          ]\n        ]\n      },\n      \"properties\": {\n        \"statistics\": {\n          \"sea_ice_fraction\": {\n            \"min\": 0.3,\n            \"max\": 0.99,\n            \"mean\": 0.845157064600111,\n            \"count\": 1725290.875,\n            \"sum\": 1458141.771496357,\n            \"std\": 0.1559272507275522,\n            \"median\": 0.9,\n            \"majority\": 0.9500000000000001,\n            \"minority\": 0.36,\n            \"unique\": 70.0,\n            \"histogram\": [\n              [\n                34892,\n                39574,\n                38696,\n                37867,\n                44348,\n                72817,\n                110580,\n                200188,\n                472678,\n                675707\n              ],\n              [\n                0.3,\n                0.369,\n                0.43799999999999994,\n                0.5069999999999999,\n                0.576,\n                0.645,\n                0.714,\n                0.7829999999999999,\n                0.8519999999999999,\n                0.9209999999999998,\n                0.99\n              ]\n            ],\n            \"valid_percent\": 57.18,\n            \"masked_pixels\": 1293477.0,\n            \"valid_pixels\": 1727347.0,\n            \"percentile_2\": 0.36,\n            \"percentile_98\": 0.99\n          }\n        }\n      }\n    }\n  ]\n}\n
In\u00a0[\u00a0]: Copied!
\n
"},{"location":"examples/xarray_backend_example/#xarray-backend-mur-sst","title":"xarray backend: MUR SST\u00b6","text":"

The MUR SST dataset has daily records for sea surface temperature and ice cover fraction. There is a netcdf file for each record.

To run the titiler-cmr service locally you can fire up the docker network with this command:

docker compose up\n
"},{"location":"examples/xarray_backend_example/#requirements","title":"Requirements\u00b6","text":"

To run some of the chunks in this notebook you will need to install a few packages: earthaccess, folium, httpx, xarray

"},{"location":"examples/xarray_backend_example/#identify-the-dataset","title":"Identify the dataset\u00b6","text":"

You can find the MUR SST dataset using the earthaccess.search_datasets function.

"},{"location":"examples/xarray_backend_example/#examine-a-granule","title":"Examine a granule\u00b6","text":"

Each granule contains a single day record for the entire globe and has a single data file.

"},{"location":"examples/xarray_backend_example/#explore-the-available-variables","title":"Explore the available variables\u00b6","text":"

The NetCDF file can be opened with xarray using the h5netcdf engine. When running outside of AWS region us-west-2 you will need to access the data using \"external\" https links (rather than \"direct\" s3 links). Those links will require authentication which is handled by earthaccess as long as you have your Earthdata credentials stored in the ~/.netrc file!

"},{"location":"examples/xarray_backend_example/#define-a-query-for-titiler-cmr","title":"Define a query for titiler-cmr\u00b6","text":"

To use titiler-cmr's endpoints for a NetCDF dataset like this we need to define a date range for the CMR query and a variable to analyze.

"},{"location":"examples/xarray_backend_example/#display-tiles-in-an-interactive-map","title":"Display tiles in an interactive map\u00b6","text":"

The /tilejson.json endpoint will provide a parameterized xyz tile URL that can be added to an interactive map.

"},{"location":"examples/xarray_backend_example/#geojson-statistics","title":"GeoJSON Statistics\u00b6","text":"

The /statistics endpoint can be used to get summary statistics for a geojson Feature or FeatureCollection.

"}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Home","text":"

A modern dynamic tile server with a NASA CMR backend built on top of FastAPI and Rasterio/GDAL.

"},{"location":"#titiler-cmr","title":"titiler-cmr","text":"

An API for creating image tiles from CMR queries.

"},{"location":"#features","title":"Features","text":"
  • Render tiles from assets discovered via queries to NASA's CMR
  • Uses the earthaccess python package to query the CMR
  • Built on top of titiler
  • Multiple projections support (see TileMatrixSets) via morecantile.
  • JPEG / JP2 / PNG / WEBP / GTIFF / NumpyTile output format support
  • Automatic OpenAPI documentation (FastAPI builtin)
  • Example of AWS Lambda / ECS deployment (via CDK)
"},{"location":"#installation","title":"Installation","text":"

To install from sources and run for development, install uv then:

git clone https://github.com/developmentseed/titiler-cmr.git\ncd titiler-cmr\n\nuv sync --all-extras\n
"},{"location":"#authentication-for-data-read-access","title":"Authentication for data read access","text":"

titiler-cmr can read data either over HTTP (external) or directly from AWS S3 (direct) depending on the app configuration. The behavior of the application is controlled by the S3 authentication settings in settings.py, which you can set either with environment variables (TITILER_CMR_S3_AUTH_ACCESS, TITILER_CMR_S3_AUTH_STRATEGY) or in an environment file (.env).

"},{"location":"#direct-from-s3","title":"Direct from S3","text":"

When running in an AWS context (e.g. Lambda), you should configure the application to access the data directly from S3. You can do this in two ways:

  • Configure an AWS IAM role for your runtime environment that has read access to the NASA buckets so that rasterio/GDAL can find the AWS credentials when reading data
  • Set the EARTHDATA_USERNAME and EARTHDATA_PASSWORD environment variables so that the earthaccess package can issue temporary AWS credentials

Note

Direct S3 access configuration will only work if the application is running in the same AWS region as the data are stored!

"},{"location":"#external-access","title":"External access","text":"

When running outside of the AWS context (e.g. locally) you will need to configure the application to access data over HTTP. You can do this by creating an Earthdata account, configuring your .netrc file with your Earthdata login credentials (which GDAL will find when trying to access data over the network), and setting a few environment variables:

# environment variables for GDAL to read data from NASA over HTTP\nexport GDAL_DISABLE_READDIR_ON_OPEN=YES\nexport CPL_VSIL_CURL_USE_HEAD=FALSE\nexport GDAL_HTTP_COOKIEFILE=/tmp/cookies.txt\nexport GDAL_HTTP_COOKIEJAR=/tmp/cookies.txt\nexport EARTHDATA_USERNAME={your earthdata username}\nexport EARTHDATA_PASSWORD={your earthdata password}\n\n# write your .netrc file to the home directory\necho \"machine urs.earthdata.nasa.gov login ${EARTHDATA_USERNAME} password ${EARTHDATA_PASSWORD}\" > ~/.netrc\n

Note

See NASA's docs for details

"},{"location":"#docker-deployment","title":"Docker deployment","text":"

You can run the application in a docker container using the docker-compose.yml file. The docker container is configured to read the EARTHDATA_USERNAME and EARTHDATA_PASSWORD environment variables so make sure set those before starting the docker network.

docker compose up --build \n

The application will be available at this address: http://localhost:8081/api.html

"},{"location":"#local-deployment","title":"Local deployment","text":"

To run the application directly in your local environment, configure the application to access data over HTTP then run it using uvicorn:

TITILER_CMR_S3_AUTH_ACCESS=external uvicorn titiler.cmr.main:app --reload\n

The application will be available at this address: http://localhost:8000/api.html

"},{"location":"#contribution-development","title":"Contribution & Development","text":"

See CONTRIBUTING.md

"},{"location":"#license","title":"License","text":"

See LICENSE

"},{"location":"#authors","title":"Authors","text":"

Created by Development Seed

See contributors for a listing of individual contributors.

"},{"location":"#changes","title":"Changes","text":"

See CHANGES.md.

"},{"location":"benchmark_analysis/","title":"Benchmark analysis","text":"In\u00a0[1]: Copied!
\"\"\"Functions for processing and plotting the time series benchmark results\"\"\"\n
\"\"\"Functions for processing and plotting the time series benchmark results\"\"\" Out[1]:
'Functions for processing and plotting the time series benchmark results'
In\u00a0[2]: Copied!
import json\nfrom collections import defaultdict\nfrom typing import Any, Dict, List\n
import json from collections import defaultdict from typing import Any, Dict, List In\u00a0[3]: Copied!
import pandas as pd\nimport plotly.express as px\n
import pandas as pd import plotly.express as px In\u00a0[4]: Copied!
def process_benchmark_data() -> Dict[str, Dict[str, pd.DataFrame]]:\n    \"\"\"Read the benchmark json and convert into a dictionary of dataframes\"\"\"\n    with open(\"../benchmark.json\", \"r\") as f:\n        benchmarks = json.load(f)[\"benchmarks\"]\n\n    records: Dict[str, Dict[str, List[Dict[str, Any]]]] = defaultdict(\n        lambda: defaultdict(list)\n    )\n    for bench in benchmarks:\n        dataset = bench[\"extra_info\"].pop(\"concept_config_id\")\n        record = {\n            \"mean_time\": bench[\"stats\"][\"mean\"],\n            \"stddev\": bench[\"stats\"][\"stddev\"],\n            **bench[\"extra_info\"],\n            **bench[\"params\"],\n        }\n        record.update(bench[\"params\"])\n        records[bench[\"group\"]][dataset].append(record)\n\n    dfs: Dict[str, Dict[str, pd.DataFrame]] = defaultdict(dict)\n    for group, dataset_records in records.items():\n        for dataset, _records in dataset_records.items():\n            df = pd.DataFrame(_records).sort_values(by=\"bbox_size\")\n\n            bbox_dims = sorted(\n                df[\"bbox_dims\"].unique(), key=lambda x: float(x.split(\"x\")[0]) * -1\n            )\n\n            df[\"bbox_dims\"] = pd.Categorical(\n                df[\"bbox_dims\"], categories=bbox_dims, ordered=True\n            )\n\n            dfs[group][dataset] = df\n\n    return dfs\n
def process_benchmark_data() -> Dict[str, Dict[str, pd.DataFrame]]: \"\"\"Read the benchmark json and convert into a dictionary of dataframes\"\"\" with open(\"../benchmark.json\", \"r\") as f: benchmarks = json.load(f)[\"benchmarks\"] records: Dict[str, Dict[str, List[Dict[str, Any]]]] = defaultdict( lambda: defaultdict(list) ) for bench in benchmarks: dataset = bench[\"extra_info\"].pop(\"concept_config_id\") record = { \"mean_time\": bench[\"stats\"][\"mean\"], \"stddev\": bench[\"stats\"][\"stddev\"], **bench[\"extra_info\"], **bench[\"params\"], } record.update(bench[\"params\"]) records[bench[\"group\"]][dataset].append(record) dfs: Dict[str, Dict[str, pd.DataFrame]] = defaultdict(dict) for group, dataset_records in records.items(): for dataset, _records in dataset_records.items(): df = pd.DataFrame(_records).sort_values(by=\"bbox_size\") bbox_dims = sorted( df[\"bbox_dims\"].unique(), key=lambda x: float(x.split(\"x\")[0]) * -1 ) df[\"bbox_dims\"] = pd.Categorical( df[\"bbox_dims\"], categories=bbox_dims, ordered=True ) dfs[group][dataset] = df return dfs In\u00a0[5]: Copied!
def plot_line_with_error_bars(df: pd.DataFrame, **kwargs):\n    \"\"\"Create line plot with vertical error bars\"\"\"\n    fig = px.line(\n        df,\n        x=\"num_timepoints\",\n        y=\"mean_time\",\n        error_y=\"stddev\",\n        labels={\n            \"mean_time\": \"mean response time (seconds)\",\n            \"num_timepoints\": \"number of points in time series\",\n        },\n        **kwargs,\n    )\n\n    return fig\n
def plot_line_with_error_bars(df: pd.DataFrame, **kwargs): \"\"\"Create line plot with vertical error bars\"\"\" fig = px.line( df, x=\"num_timepoints\", y=\"mean_time\", error_y=\"stddev\", labels={ \"mean_time\": \"mean response time (seconds)\", \"num_timepoints\": \"number of points in time series\", }, **kwargs, ) return fig In\u00a0[6]: Copied!
def plot_error_rate_heatmap(\n    df: pd.DataFrame,\n    x: str,\n    y: str,\n    z: str,\n    labels: Dict[str, str],\n    title: str,\n):\n    \"\"\"Plot error rate as a heatmap\"\"\"\n    # Create the pivot table for heatmap\n    data = df[[x, y, z]].pivot(index=y, columns=x, values=z)\n\n    # Create the faceted heatmap using plotly\n    fig = px.imshow(\n        data,\n        labels=labels,\n        title=title,\n    )\n\n    return fig\n
def plot_error_rate_heatmap( df: pd.DataFrame, x: str, y: str, z: str, labels: Dict[str, str], title: str, ): \"\"\"Plot error rate as a heatmap\"\"\" # Create the pivot table for heatmap data = df[[x, y, z]].pivot(index=y, columns=x, values=z) # Create the faceted heatmap using plotly fig = px.imshow( data, labels=labels, title=title, ) return fig In\u00a0[7]: Copied!
# Load and process the data\ndfs = process_benchmark_data()\n
# Load and process the data dfs = process_benchmark_data()
\n---------------------------------------------------------------------------\nFileNotFoundError                         Traceback (most recent call last)\nCell In[7], line 2\n      1 # Load and process the data\n----> 2 dfs = process_benchmark_data()\n\nCell In[4], line 3, in process_benchmark_data()\n      1 def process_benchmark_data() -> Dict[str, Dict[str, pd.DataFrame]]:\n      2     \"\"\"Read the benchmark json and convert into a dictionary of dataframes\"\"\"\n----> 3     with open(\"../benchmark.json\", \"r\") as f:\n      4         benchmarks = json.load(f)[\"benchmarks\"]\n      6     records: Dict[str, Dict[str, List[Dict[str, Any]]]] = defaultdict(\n      7         lambda: defaultdict(list)\n      8     )\n\nFile ~/work/titiler-cmr/titiler-cmr/.venv/lib/python3.10/site-packages/IPython/core/interactiveshell.py:324, in _modified_open(file, *args, **kwargs)\n    317 if file in {0, 1, 2}:\n    318     raise ValueError(\n    319         f\"IPython won't let you open fd={file} by default \"\n    320         \"as it is likely to crash IPython. If you know what you are doing, \"\n    321         \"you can use builtins' open.\"\n    322     )\n--> 324 return io_open(file, *args, **kwargs)\n\nFileNotFoundError: [Errno 2] No such file or directory: '../benchmark.json'
"},{"location":"contributing/","title":"Development - Contributing","text":"

Issues and pull requests are more than welcome: github.com/developmentseed/titiler-cmr/issues

dev install

This project uses uv to manage the python environment and dependencies. To install the package for development you can follow these steps:

# install uv\n\n# unix\ncurl -LsSf https://astral.sh/uv/install.sh | sh\n\n# or windows\n# powershell -c \"irm https://astral.sh/uv/install.ps1 | iex\"\n\ngit clone https://github.com/developmentseed/titiler-cmr.git\ncd titiler-cmr\nuv sync --all-extras\n
"},{"location":"contributing/#linting","title":"Linting","text":"

This repo is set to use pre-commit to run isort, flake8, pydocstring, black (\"uncompromising Python code formatter\") and mypy when committing new code.

uv pre-commit install\n
"},{"location":"contributing/#testing","title":"Testing","text":"

You can then run the tests with the following command:

uv run pytest\n

The tests use vcrpy <https://vcrpy.readthedocs.io/en/latest/>_ to mock API calls with \"pre-recorded\" API responses. When adding new tests that incur actual network traffic, use the @pytest.mark.vcr decorator function to indicate vcrpy should be used. Record the new responses and commit them to the repository.

uv run pytest -v -s --record-mode new_episodes\n
"},{"location":"contributing/#benchmarks","title":"Benchmarks","text":"

tests/test_timeseries_benchmark.py is used to evaluate the performance and limits of /timeseries requests.

To run the benchmarks:

uv run pytest -vv --benchmark-only --benchmark-json benchmark.json\n
"},{"location":"contributing/#documentation","title":"Documentation","text":"

The documentation is generated using mkdocs and gets built and deployed to Github Pages when new tags are released and on pushes to the develop branch.

To preview the documentation in your browser you can run:

uv run mkdocs serve -o\n
"},{"location":"release-notes/","title":"Changelog","text":"

All notable changes to this project will be documented in this file.

The format is based on Keep a Changelog.

"},{"location":"release-notes/#unreleased","title":"Unreleased","text":""},{"location":"release-notes/#added","title":"Added","text":"
  • Initial implementation of STAC metadata structure
"},{"location":"release-notes/#deprecated","title":"Deprecated","text":"
  • Nothing.
"},{"location":"release-notes/#removed","title":"Removed","text":"
  • Nothing.
"},{"location":"release-notes/#fixed","title":"Fixed","text":"
  • Nothing.
"},{"location":"release-notes/#012","title":"0.1.2","text":""},{"location":"release-notes/#added_1","title":"Added","text":"
  • Support for /timeseries endpoints (#33)
"},{"location":"release-notes/#deprecated_1","title":"Deprecated","text":"
  • Nothing.
"},{"location":"release-notes/#removed_1","title":"Removed","text":"
  • Nothing.
"},{"location":"release-notes/#fixed_1","title":"Fixed","text":"
  • Nothing.
"},{"location":"release-notes/#011","title":"0.1.1","text":""},{"location":"release-notes/#added_2","title":"Added","text":"
  • Add /bbox, /feature, and /statistics endpoints (#30)
"},{"location":"release-notes/#deprecated_2","title":"Deprecated","text":"
  • Nothing.
"},{"location":"release-notes/#removed_2","title":"Removed","text":"
  • Nothing.
"},{"location":"release-notes/#fixed_2","title":"Fixed","text":"
  • Nothing.
"},{"location":"release-notes/#011_1","title":"0.1.1","text":""},{"location":"release-notes/#added_3","title":"Added","text":"

-

Ability to run locally with Earthdata authentication (#28)

"},{"location":"release-notes/#deprecated_3","title":"Deprecated","text":"
  • Nothing.
"},{"location":"release-notes/#removed_3","title":"Removed","text":"
  • Nothing.
"},{"location":"release-notes/#fixed_3","title":"Fixed","text":"
  • Nothing.
"},{"location":"time_series_performance_benchmarks/","title":"Time series performance benchmarks","text":"In\u00a0[1]: Copied!
import benchmark_analysis as ba\n
import benchmark_analysis as ba In\u00a0[2]: Copied!
for dataset, df in ba.dfs[\"statistics\"].items():\n    fig = ba.plot_error_rate_heatmap(\n        df=df,\n        x=\"num_timepoints\",\n        y=\"bbox_dims\",\n        z=\"error_rate\",\n        labels={\"x\": \"number of time points\", \"y\": \"bbox dimensions\", \"color\": \"error rate\"},\n        title=f\"{dataset}: error rate by bbox size and number of time points\",\n    )\n    fig.show()\n
for dataset, df in ba.dfs[\"statistics\"].items(): fig = ba.plot_error_rate_heatmap( df=df, x=\"num_timepoints\", y=\"bbox_dims\", z=\"error_rate\", labels={\"x\": \"number of time points\", \"y\": \"bbox dimensions\", \"color\": \"error rate\"}, title=f\"{dataset}: error rate by bbox size and number of time points\", ) fig.show()

In general, the size of the area you want to analyze will have minimal impact on the runtime! This is because titiler.xarray has to read the entire granule into memory before subsetting, so reducing the size of the AOI does not reduce the overall footprint of the computation.

In\u00a0[3]: Copied!
for dataset, df in ba.dfs[\"statistics\"].items():\n    ba.plot_line_with_error_bars(\n        df=df.sort_values([\"bbox_size\", \"num_timepoints\"]),\n        color=\"bbox_dims\",\n        title=f\"{dataset}: statistics runtime\",\n    ).show()\n
for dataset, df in ba.dfs[\"statistics\"].items(): ba.plot_line_with_error_bars( df=df.sort_values([\"bbox_size\", \"num_timepoints\"]), color=\"bbox_dims\", title=f\"{dataset}: statistics runtime\", ).show() In\u00a0[4]: Copied!
for dataset, df in ba.dfs[\"bbox\"].items():\n    for img_size in sorted(df[\"img_size\"].unique()):\n        img_size_df = df[df[\"img_size\"] == img_size]\n        img_dims = img_size_df[\"img_dims\"].unique()[0]\n        ba.plot_error_rate_heatmap(\n            df=img_size_df,\n            x=\"num_timepoints\",\n            y=\"bbox_dims\",\n            z=\"error_rate\",\n            labels={\"x\": \"number of time points\", \"y\": \"bbox dimensions\", \"color\": \"error rate\"},\n            title=f\"{dataset}: image size {img_dims}\",\n        ).show()\n
for dataset, df in ba.dfs[\"bbox\"].items(): for img_size in sorted(df[\"img_size\"].unique()): img_size_df = df[df[\"img_size\"] == img_size] img_dims = img_size_df[\"img_dims\"].unique()[0] ba.plot_error_rate_heatmap( df=img_size_df, x=\"num_timepoints\", y=\"bbox_dims\", z=\"error_rate\", labels={\"x\": \"number of time points\", \"y\": \"bbox dimensions\", \"color\": \"error rate\"}, title=f\"{dataset}: image size {img_dims}\", ).show()

The size of the area of interest increases the response time, especially for requests for higher resolution images.

In\u00a0[5]: Copied!
for dataset, df in ba.dfs[\"bbox\"].items():\n    ba.plot_line_with_error_bars(\n        df=df.sort_values([\"bbox_size\", \"num_timepoints\"]),\n        color=\"bbox_dims\",\n        facet_row=\"img_dims\",\n        title=f\"{dataset}: runtime by bbox size and image dimensions\"\n    ).show()\n
for dataset, df in ba.dfs[\"bbox\"].items(): ba.plot_line_with_error_bars( df=df.sort_values([\"bbox_size\", \"num_timepoints\"]), color=\"bbox_dims\", facet_row=\"img_dims\", title=f\"{dataset}: runtime by bbox size and image dimensions\" ).show()"},{"location":"time_series_performance_benchmarks/#time-series-performance-benchmarks","title":"Time series performance benchmarks\u00b6","text":"

The titiler-cmr API is deployed as a Lambda function in the SMCE VEDA AWS account. For small time series requests (<500 time points) you can expect a response from any of the endpoints within ~20 seconds. For larger time series requests, you run the risk of bumping into Lambda concurrency or timeout limits. This report shows some results from the test_timeseries_benchmarks.py script that sends many requests with varying time series lengths as well as several other parameters that affect runtime.

"},{"location":"time_series_performance_benchmarks/#xarray-backend","title":"xarray backend\u00b6","text":"

The following tests use the following datasets to evaluate the limits of the /timeseries endpoints for the xarray backend

  • GAMSSA 28km SST: a daily 0.25 degree (~28 km) resolution dataset with sea surface temperature and sea ice fraction variables
  • MUR SST: a daily 0.01 degree (~1km) resolution dataset with sea surface temperature variables
"},{"location":"time_series_performance_benchmarks/#statistics","title":"statistics\u00b6","text":"

Under the current deployment configuration statistics endpoint can process time series requests with up to ~1000 points. Requests that involve more than 1000 points are likely to fail.

"},{"location":"time_series_performance_benchmarks/#bbox-animations","title":"bbox (animations)\u00b6","text":"

Under the current deployment configuration the bbox endpoint can reliably process time series requests with up to ~500 points. Requests that involve more than 500 points may fail if the area of interest is very large.

"},{"location":"examples/rasterio_backend_example/","title":"rasterio backend example: HLS","text":"In\u00a0[1]: Copied!
import earthaccess\nimport geojson_pydantic\nimport httpx\nimport json\n\n\nfrom folium import GeoJson, Map, TileLayer\n
import earthaccess import geojson_pydantic import httpx import json from folium import GeoJson, Map, TileLayer In\u00a0[2]: Copied!
# titiler_endpoint = \"http://localhost:8081\"  # docker network endpoint\ntitiler_endpoint = \"https://dev-titiler-cmr.delta-backend.com\"  # deployed endpoint\n
# titiler_endpoint = \"http://localhost:8081\" # docker network endpoint titiler_endpoint = \"https://dev-titiler-cmr.delta-backend.com\" # deployed endpoint In\u00a0[3]: Copied!
datasets = earthaccess.search_datasets(doi=\"10.5067/HLS/HLSL30.002\")\nds = datasets[0]\n\nconcept_id = ds[\"meta\"][\"concept-id\"]\nprint(\"Concept-Id: \", concept_id)\nprint(\"Abstract: \", ds[\"umm\"][\"Abstract\"])\n
datasets = earthaccess.search_datasets(doi=\"10.5067/HLS/HLSL30.002\") ds = datasets[0] concept_id = ds[\"meta\"][\"concept-id\"] print(\"Concept-Id: \", concept_id) print(\"Abstract: \", ds[\"umm\"][\"Abstract\"])
Concept-Id:  C2021957657-LPCLOUD\nAbstract:  The Harmonized Landsat Sentinel-2 (HLS) project provides consistent surface reflectance (SR) and top of atmosphere (TOA) brightness data from a virtual constellation of satellite sensors. The Operational Land Imager (OLI) is housed aboard the joint NASA/USGS Landsat 8 and Landsat 9 satellites, while the Multi-Spectral Instrument (MSI) is mounted aboard Europe\u2019s Copernicus Sentinel-2A and Sentinel-2B satellites. The combined measurement enables global observations of the land every 2\u20133 days at 30-meter (m) spatial resolution. The HLS project uses a set of algorithms to obtain seamless products from OLI and MSI that include atmospheric correction, cloud and cloud-shadow masking, spatial co-registration and common gridding, illumination and view angle normalization, and spectral bandpass adjustment.\r\n\r\nThe HLSL30 product provides 30-m Nadir Bidirectional Reflectance Distribution Function (BRDF)-Adjusted Reflectance (NBAR) and is derived from Landsat 8/9 OLI data products. The HLSS30 and HLSL30 products are gridded to the same resolution and Military Grid Reference System (MGRS)(https://hls.gsfc.nasa.gov/products-description/tiling-system/) tiling system, and thus are \u201cstackable\u201d for time series analysis.\r\n\r\nThe HLSL30 product is provided in Cloud Optimized GeoTIFF (COG) format, and each band is distributed as a separate file. There are 11 bands included in the HLSL30 product along with one quality assessment (QA) band and four angle bands. See the User Guide for a more detailed description of the individual bands provided in the HLSL30 product.\n
In\u00a0[4]: Copied!
import earthaccess\nimport morecantile\n\ntms = morecantile.tms.get(\"WebMercatorQuad\")\n\nbounds = tms.bounds(62, 44, 7)\nxmin, ymin, xmax, ymax = (round(n, 8) for n in bounds)\n\nresults = earthaccess.search_data(\n    bounding_box=(xmin, ymin, xmax, ymax),\n    count=1,\n    concept_id=concept_id,\n    temporal=(\"2024-02-11\", \"2024-02-13\"),\n)\nprint(\"Granules:\")\nprint(results)\nprint()\nprint(\"Example of COGs URL: \")\nfor link in results[0].data_links(access=\"direct\"):\n    print(link)\n
import earthaccess import morecantile tms = morecantile.tms.get(\"WebMercatorQuad\") bounds = tms.bounds(62, 44, 7) xmin, ymin, xmax, ymax = (round(n, 8) for n in bounds) results = earthaccess.search_data( bounding_box=(xmin, ymin, xmax, ymax), count=1, concept_id=concept_id, temporal=(\"2024-02-11\", \"2024-02-13\"), ) print(\"Granules:\") print(results) print() print(\"Example of COGs URL: \") for link in results[0].data_links(access=\"direct\"): print(link)
Granules:\n[Collection: {'EntryTitle': 'HLS Landsat Operational Land Imager Surface Reflectance and TOA Brightness Daily Global 30m v2.0'}\nSpatial coverage: {'HorizontalSpatialDomain': {'Geometry': {'GPolygons': [{'Boundary': {'Points': [{'Longitude': -2.64743819, 'Latitude': 48.6644919}, {'Longitude': -2.21521695, 'Latitude': 49.65006328}, {'Longitude': -3.00027708, 'Latitude': 49.65272281}, {'Longitude': -3.00027162, 'Latitude': 48.66503141}, {'Longitude': -2.64743819, 'Latitude': 48.6644919}]}}]}}}\nTemporal coverage: {'RangeDateTime': {'BeginningDateTime': '2024-02-12T11:05:26.302Z', 'EndingDateTime': '2024-02-12T11:05:50.181Z'}}\nSize(MB): 56.62721920013428\nData: ['https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B02.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B06.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B01.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.SAA.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B07.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.SZA.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B03.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.Fmask.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B04.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B05.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.VAA.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.VZA.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B11.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B10.tif', 'https://data.lpdaac.earthdatacloud.nasa.gov/lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B09.tif']]\n\nExample of COGs URL: \ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B02.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B06.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B01.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.SAA.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B07.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.SZA.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B03.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.Fmask.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B04.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B05.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.VAA.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.VZA.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B11.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B10.tif\ns3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B09.tif\n
In\u00a0[5]: Copied!
from titiler.cmr.backend import CMRBackend\nfrom titiler.cmr.reader import MultiFilesBandsReader\n\nwith CMRBackend(reader=MultiFilesBandsReader) as backend:\n    assets = backend.assets_for_tile(\n        x=62,\n        y=44,\n        z=7,\n        bands_regex=\"B[0-9][0-9]\",\n        concept_id=concept_id,\n        temporal=(\"2024-02-11\", \"2024-02-13\")\n    )\n\nprint(assets[0])\n
from titiler.cmr.backend import CMRBackend from titiler.cmr.reader import MultiFilesBandsReader with CMRBackend(reader=MultiFilesBandsReader) as backend: assets = backend.assets_for_tile( x=62, y=44, z=7, bands_regex=\"B[0-9][0-9]\", concept_id=concept_id, temporal=(\"2024-02-11\", \"2024-02-13\") ) print(assets[0])
{'url': {'B02': 's3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B02.tif', 'B06': 's3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B06.tif', 'B01': 's3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B01.tif', 'B07': 's3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B07.tif', 'B03': 's3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B03.tif', 'B04': 's3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B04.tif', 'B05': 's3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B05.tif', 'B11': 's3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B11.tif', 'B10': 's3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B10.tif', 'B09': 's3://lp-prod-protected/HLSL30.020/HLS.L30.T30UWV.2024043T110526.v2.0/HLS.L30.T30UWV.2024043T110526.v2.0.B09.tif'}, 'provider': 'LPCLOUD'}\n
In\u00a0[6]: Copied!
from IPython.display import IFrame\nIFrame(f\"{titiler_endpoint}/api.html\", 900,500)\n
from IPython.display import IFrame IFrame(f\"{titiler_endpoint}/api.html\", 900,500) Out[6]: In\u00a0[7]: Copied!
r = httpx.get(\n    f\"{titiler_endpoint}/WebMercatorQuad/tilejson.json\",\n    params = (\n        (\"concept_id\", concept_id),\n        # Datetime in form of `start_date/end_date`\n        (\"datetime\", \"2024-10-01T00:00:00Z/2024-10-10T23:59:59Z\"),\n        # We know that the HLS collection dataset is stored as File per Band\n        # so we need to pass a `band_regex` option to assign `bands` to each URL\n        (\"bands_regex\", \"B[0-9][0-9]\"),\n        # titiler-cmr can work with both Zarr and COG dataset\n        # but we need to tell the endpoints in advance which backend\n        # to use\n        (\"backend\", \"rasterio\"),\n        # True Color Image B04,B03,B02\n        (\"bands\", \"B04\"),\n        (\"bands\", \"B03\"),\n        (\"bands\", \"B02\"),\n        # The data is in type of Uint16 so we need to apply some\n        # rescaling/color_formula in order to create PNGs\n        (\"color_formula\", \"Gamma RGB 3.5 Saturation 1.7 Sigmoidal RGB 15 0.35\"),\n        # We need to set min/max zoom because we don't want to use lowerzoom level (e.g 0)\n        # which will results in useless large scale query\n        (\"minzoom\", 8),\n        (\"maxzoom\", 13),\n    )\n).json()\n\nprint(r)\n
r = httpx.get( f\"{titiler_endpoint}/WebMercatorQuad/tilejson.json\", params = ( (\"concept_id\", concept_id), # Datetime in form of `start_date/end_date` (\"datetime\", \"2024-10-01T00:00:00Z/2024-10-10T23:59:59Z\"), # We know that the HLS collection dataset is stored as File per Band # so we need to pass a `band_regex` option to assign `bands` to each URL (\"bands_regex\", \"B[0-9][0-9]\"), # titiler-cmr can work with both Zarr and COG dataset # but we need to tell the endpoints in advance which backend # to use (\"backend\", \"rasterio\"), # True Color Image B04,B03,B02 (\"bands\", \"B04\"), (\"bands\", \"B03\"), (\"bands\", \"B02\"), # The data is in type of Uint16 so we need to apply some # rescaling/color_formula in order to create PNGs (\"color_formula\", \"Gamma RGB 3.5 Saturation 1.7 Sigmoidal RGB 15 0.35\"), # We need to set min/max zoom because we don't want to use lowerzoom level (e.g 0) # which will results in useless large scale query (\"minzoom\", 8), (\"maxzoom\", 13), ) ).json() print(r)
{'tilejson': '2.2.0', 'version': '1.0.0', 'scheme': 'xyz', 'tiles': ['https://dev-titiler-cmr.delta-backend.com/tiles/WebMercatorQuad/{z}/{x}/{y}@1x?concept_id=C2021957657-LPCLOUD&datetime=2024-10-01T00%3A00%3A00Z%2F2024-10-10T23%3A59%3A59Z&bands_regex=B%5B0-9%5D%5B0-9%5D&backend=rasterio&bands=B04&bands=B03&bands=B02&color_formula=Gamma+RGB+3.5+Saturation+1.7+Sigmoidal+RGB+15+0.35'], 'minzoom': 8, 'maxzoom': 13, 'bounds': [-180.0, -90.0, 180.0, 90.0], 'center': [0.0, 0.0, 8]}\n
In\u00a0[8]: Copied!
bounds = r[\"bounds\"]\nm = Map(\n    location=(47.590266824611675, -91.03729840730689),\n    zoom_start=r[\"maxzoom\"] - 2\n)\n\nTileLayer(\n    tiles=r[\"tiles\"][0],\n    opacity=1,\n    attr=\"NASA\",\n).add_to(m)\nm\n
bounds = r[\"bounds\"] m = Map( location=(47.590266824611675, -91.03729840730689), zoom_start=r[\"maxzoom\"] - 2 ) TileLayer( tiles=r[\"tiles\"][0], opacity=1, attr=\"NASA\", ).add_to(m) m Out[8]: Make this Notebook Trusted to load map: File -> Trust Notebook In\u00a0[9]: Copied!
r = httpx.get(\n    f\"{titiler_endpoint}/WebMercatorQuad/tilejson.json\",\n    params = (\n        (\"concept_id\", concept_id),\n        # Datetime in form of `start_date/end_date`\n        (\"datetime\", \"2024-06-20T00:00:00Z/2024-06-27T23:59:59Z\"),\n        # We know that the HLS collection dataset is stored as File per Band\n        # so we need to pass a `band_regex` option to assign `bands` to each URL\n        (\"bands_regex\", \"B[0-9][0-9]\"),\n        # titiler-cmr can work with both Zarr and COG dataset\n        # but we need to tell the endpoints in advance which backend\n        # to use\n        (\"backend\", \"rasterio\"),\n        # NDVI\n        (\"expression\", \"(B05-B04)/(B05+B04)\"),\n        # Need red (B04) and nir (B05) for NDVI\n        (\"bands\", \"B05\"),\n        (\"bands\", \"B04\"),\n        # The data is in type of Uint16 so we need to apply some\n        # rescaling/color_formula in order to create PNGs\n        (\"colormap_name\", \"viridis\"),\n        (\"rescale\", \"-1,1\"),\n        # We need to set min/max zoom because we don't want to use lowerzoom level (e.g 0)\n        # which will results in useless large scale query\n        (\"minzoom\", 8),\n        (\"maxzoom\", 13),\n    )\n).json()\n\nm = Map(\n    location=(47.9221313337365, -91.65432884883238),\n    zoom_start=r[\"maxzoom\"] - 1\n)\n\n\nTileLayer(\n    tiles=r[\"tiles\"][0],\n    opacity=1,\n    attr=\"NASA\",\n).add_to(m)\n\nm\n
r = httpx.get( f\"{titiler_endpoint}/WebMercatorQuad/tilejson.json\", params = ( (\"concept_id\", concept_id), # Datetime in form of `start_date/end_date` (\"datetime\", \"2024-06-20T00:00:00Z/2024-06-27T23:59:59Z\"), # We know that the HLS collection dataset is stored as File per Band # so we need to pass a `band_regex` option to assign `bands` to each URL (\"bands_regex\", \"B[0-9][0-9]\"), # titiler-cmr can work with both Zarr and COG dataset # but we need to tell the endpoints in advance which backend # to use (\"backend\", \"rasterio\"), # NDVI (\"expression\", \"(B05-B04)/(B05+B04)\"), # Need red (B04) and nir (B05) for NDVI (\"bands\", \"B05\"), (\"bands\", \"B04\"), # The data is in type of Uint16 so we need to apply some # rescaling/color_formula in order to create PNGs (\"colormap_name\", \"viridis\"), (\"rescale\", \"-1,1\"), # We need to set min/max zoom because we don't want to use lowerzoom level (e.g 0) # which will results in useless large scale query (\"minzoom\", 8), (\"maxzoom\", 13), ) ).json() m = Map( location=(47.9221313337365, -91.65432884883238), zoom_start=r[\"maxzoom\"] - 1 ) TileLayer( tiles=r[\"tiles\"][0], opacity=1, attr=\"NASA\", ).add_to(m) m Out[9]: Make this Notebook Trusted to load map: File -> Trust Notebook In\u00a0[10]: Copied!
geojson = {\n  \"type\": \"FeatureCollection\",\n  \"features\": [\n    {\n      \"type\": \"Feature\",\n      \"properties\": {},\n      \"geometry\": {\n        \"coordinates\": [\n          [\n            [\n              -91.65432884883238,\n              47.9221313337365\n            ],\n            [\n              -91.65432884883238,\n              47.86503396133904\n            ],\n            [\n              -91.53842043960762,\n              47.86503396133904\n            ],\n            [\n              -91.53842043960762,\n              47.9221313337365\n            ],\n            [\n              -91.65432884883238,\n              47.9221313337365\n            ]\n          ]\n        ],\n        \"type\": \"Polygon\"\n      }\n    }\n  ]\n}\n
geojson = { \"type\": \"FeatureCollection\", \"features\": [ { \"type\": \"Feature\", \"properties\": {}, \"geometry\": { \"coordinates\": [ [ [ -91.65432884883238, 47.9221313337365 ], [ -91.65432884883238, 47.86503396133904 ], [ -91.53842043960762, 47.86503396133904 ], [ -91.53842043960762, 47.9221313337365 ], [ -91.65432884883238, 47.9221313337365 ] ] ], \"type\": \"Polygon\" } } ] } In\u00a0[11]: Copied!
import json\n\nr = httpx.post(\n    f\"{titiler_endpoint}/statistics\",\n    params=(\n        (\"concept_id\", concept_id),\n        # Datetime in form of `start_date/end_date`\n        (\"datetime\", \"2024-07-01T00:00:00Z/2024-07-10T23:59:59Z\"),\n        # We know that the HLS collection dataset is stored as File per Band\n        # so we need to pass a `band_regex` option to assign `bands` to each URL\n        (\"bands_regex\", \"B[0-9][0-9]\"),\n        # titiler-cmr can work with both Zarr and COG dataset\n        # but we need to tell the endpoints in advance which backend\n        # to use\n        (\"backend\", \"rasterio\"),\n        # NDVI\n        (\"expression\", \"(B05-B04)/(B05+B04)\"),\n        # Need red (B04) and nir (B05) for NDVI\n        (\"bands\", \"B05\"),\n        (\"bands\", \"B04\"),\n    ),\n    json=geojson,\n    timeout=30,\n).json()\n\nprint(json.dumps(r, indent=2))\n
import json r = httpx.post( f\"{titiler_endpoint}/statistics\", params=( (\"concept_id\", concept_id), # Datetime in form of `start_date/end_date` (\"datetime\", \"2024-07-01T00:00:00Z/2024-07-10T23:59:59Z\"), # We know that the HLS collection dataset is stored as File per Band # so we need to pass a `band_regex` option to assign `bands` to each URL (\"bands_regex\", \"B[0-9][0-9]\"), # titiler-cmr can work with both Zarr and COG dataset # but we need to tell the endpoints in advance which backend # to use (\"backend\", \"rasterio\"), # NDVI (\"expression\", \"(B05-B04)/(B05+B04)\"), # Need red (B04) and nir (B05) for NDVI (\"bands\", \"B05\"), (\"bands\", \"B04\"), ), json=geojson, timeout=30, ).json() print(json.dumps(r, indent=2))
{\n  \"type\": \"FeatureCollection\",\n  \"features\": [\n    {\n      \"type\": \"Feature\",\n      \"geometry\": {\n        \"type\": \"Polygon\",\n        \"coordinates\": [\n          [\n            [\n              -91.65432884883238,\n              47.9221313337365\n            ],\n            [\n              -91.65432884883238,\n              47.86503396133904\n            ],\n            [\n              -91.53842043960762,\n              47.86503396133904\n            ],\n            [\n              -91.53842043960762,\n              47.9221313337365\n            ],\n            [\n              -91.65432884883238,\n              47.9221313337365\n            ]\n          ]\n        ]\n      },\n      \"properties\": {\n        \"statistics\": {\n          \"(B05-B04)/(B05+B04)\": {\n            \"min\": -75.4,\n            \"max\": 26.6,\n            \"mean\": 0.5238783261952482,\n            \"count\": 57304.8046875,\n            \"sum\": 30020.745162633113,\n            \"std\": 0.6052277569586431,\n            \"median\": 0.6041512231282431,\n            \"majority\": 0.75,\n            \"minority\": -75.4,\n            \"unique\": 47613.0,\n            \"histogram\": [\n              [\n                1,\n                0,\n                2,\n                1,\n                0,\n                0,\n                16,\n                57764,\n                12,\n                2\n              ],\n              [\n                -75.4,\n                -65.2,\n                -55.00000000000001,\n                -44.80000000000001,\n                -34.60000000000001,\n                -24.400000000000006,\n                -14.20000000000001,\n                -4.000000000000014,\n                6.199999999999989,\n                16.39999999999999,\n                26.6\n              ]\n            ],\n            \"valid_percent\": 100.0,\n            \"masked_pixels\": 0.0,\n            \"valid_pixels\": 57798.0,\n            \"percentile_2\": 0.04382638010956595,\n            \"percentile_98\": 0.8685282140779523\n          }\n        }\n      }\n    }\n  ]\n}\n
In\u00a0[\u00a0]: Copied!
\n
"},{"location":"examples/rasterio_backend_example/#rasterio-backend-example-hls","title":"rasterio backend example: HLS\u00b6","text":"

The Harmonized Landsat Sentinel-2 dataset is available in two collections in CMR. This example will use data from the HLSL30.002 (Landsat) dataset.

"},{"location":"examples/rasterio_backend_example/#requirements","title":"Requirements\u00b6","text":"

To run some of the chunks in this notebook you will need to install a few packages:

  • earthaccess
  • folium
  • httpx

!pip install folium httpx earthaccess

"},{"location":"examples/rasterio_backend_example/#identify-the-dataset","title":"Identify the dataset\u00b6","text":"

You can find the HLSL30.002 dataset using the earthaccess.search_datasets function.

"},{"location":"examples/rasterio_backend_example/#examine-a-granule","title":"Examine a granule\u00b6","text":"

Each granule contains the data for a single point in time for an MGRS tile.

"},{"location":"examples/rasterio_backend_example/#demonstrate-assets_for_tile-method","title":"Demonstrate assets_for_tile method\u00b6","text":"

While rendering xyz tile images, titiler-cmr searches for assets using the assets_for_tile method which converts the xyz tile extent into a bounding box.

"},{"location":"examples/rasterio_backend_example/#titilercmr-api-documentation","title":"titiler.cmr API documentation\u00b6","text":""},{"location":"examples/rasterio_backend_example/#display-tiles-in-an-interactive-map","title":"Display tiles in an interactive map\u00b6","text":"

The /tilejson.json endpoint will provide a parameterized xyz tile URL that can be added to an interactive map.

"},{"location":"examples/rasterio_backend_example/#render-ndvi-using-the-expression-parameter","title":"Render NDVI using the expression parameter\u00b6","text":"

The expression parameter can be used to render images from an expression of a combination of the individual bands.

"},{"location":"examples/rasterio_backend_example/#geojson-statistics","title":"GeoJSON Statistics\u00b6","text":"

The /statistics endpoint can be used to get summary statistics for a geojson Feature or FeatureCollection.

"},{"location":"examples/time_series_example/","title":"time series API","text":"In\u00a0[1]: Copied!
from IPython.display import IFrame\n\n# if running titiler-cmr in the docker network\n# titiler_endpoint = \"http://localhost:8081\"\n\n# titiler-cmr-staging deployment\ntitiler_endpoint = \"https://dev-titiler-cmr.delta-backend.com\"\n\nIFrame(f\"{titiler_endpoint}/api.html#Timeseries\", 900, 500)\n
from IPython.display import IFrame # if running titiler-cmr in the docker network # titiler_endpoint = \"http://localhost:8081\" # titiler-cmr-staging deployment titiler_endpoint = \"https://dev-titiler-cmr.delta-backend.com\" IFrame(f\"{titiler_endpoint}/api.html#Timeseries\", 900, 500) Out[1]: In\u00a0[2]: Copied!
import json\nfrom datetime import datetime\n\nimport httpx\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom folium import LayerControl, Map, TileLayer\nfrom geojson_pydantic import Feature, Polygon\nfrom IPython.display import Image, display\n
import json from datetime import datetime import httpx import matplotlib.pyplot as plt import numpy as np from folium import LayerControl, Map, TileLayer from geojson_pydantic import Feature, Polygon from IPython.display import Image, display In\u00a0[3]: Copied!
concept_id = \"C2036881735-POCLOUD\"\n
concept_id = \"C2036881735-POCLOUD\"

The /timeseries GET endpoint is useful for demonstrating how the timeseries family of endpoints constructs sub-requests. It returns the list of titiler.cmr query parameters (datetime and concept_id) that will be used to generate the timeseries results.

In\u00a0[4]: Copied!
response = httpx.get(\n    f\"{titiler_endpoint}/timeseries\",\n    params={\n        \"concept_id\": concept_id,\n        \"datetime\": \"2024-10-01T00:00:01Z/2024-10-05T00:00:01Z\",\n    },\n    timeout=None,\n).json()\n\nprint(json.dumps(response, indent=2))\n
response = httpx.get( f\"{titiler_endpoint}/timeseries\", params={ \"concept_id\": concept_id, \"datetime\": \"2024-10-01T00:00:01Z/2024-10-05T00:00:01Z\", }, timeout=None, ).json() print(json.dumps(response, indent=2))
[\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-01T12:00:00+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-02T12:00:00+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-03T12:00:00+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-04T12:00:00+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-05T12:00:00+00:00\"\n  }\n]\n
In\u00a0[5]: Copied!
response = httpx.get(\n    f\"{titiler_endpoint}/timeseries\",\n    params={\n        \"concept_id\": concept_id,\n        \"datetime\": \"2024-10-01T00:00:01Z/2024-10-30T00:00:01Z\",\n        \"step\": \"P1W\",\n        \"temporal_mode\": \"point\",\n    }\n).json()\n\nprint(json.dumps(response, indent=2))\n
response = httpx.get( f\"{titiler_endpoint}/timeseries\", params={ \"concept_id\": concept_id, \"datetime\": \"2024-10-01T00:00:01Z/2024-10-30T00:00:01Z\", \"step\": \"P1W\", \"temporal_mode\": \"point\", } ).json() print(json.dumps(response, indent=2))
[\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-01T00:00:01+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-08T00:00:01+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-15T00:00:01+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-22T00:00:01+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-29T00:00:01+00:00\"\n  }\n]\n
In\u00a0[6]: Copied!
response = httpx.get(\n    f\"{titiler_endpoint}/timeseries\",\n    params={\n        \"concept_id\": concept_id,\n        \"datetime\": \"2024-10-01T00:00:01Z/2024-10-30T00:00:01Z\",\n        \"step\": \"P1W\",\n        \"temporal_mode\": \"interval\",\n    }\n).json()\n\nprint(json.dumps(response, indent=2))\n
response = httpx.get( f\"{titiler_endpoint}/timeseries\", params={ \"concept_id\": concept_id, \"datetime\": \"2024-10-01T00:00:01Z/2024-10-30T00:00:01Z\", \"step\": \"P1W\", \"temporal_mode\": \"interval\", } ).json() print(json.dumps(response, indent=2))
[\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-01T00:00:01+00:00/2024-10-08T00:00:00+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-08T00:00:01+00:00/2024-10-15T00:00:00+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-15T00:00:01+00:00/2024-10-22T00:00:00+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-22T00:00:01+00:00/2024-10-29T00:00:00+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-29T00:00:01+00:00/2024-10-30T00:00:01+00:00\"\n  }\n]\n
In\u00a0[7]: Copied!
response = httpx.get(\n    f\"{titiler_endpoint}/timeseries\",\n    params={\n        \"concept_id\": concept_id,\n        \"datetime\": \",\".join(\n            [\"2024-10-01T00:00:01Z\", \"2024-10-07T00:00:01Z/2024-10-09T23:59:59Z\"]\n        ),\n    }\n).json()\n\nprint(json.dumps(response, indent=2))\n
response = httpx.get( f\"{titiler_endpoint}/timeseries\", params={ \"concept_id\": concept_id, \"datetime\": \",\".join( [\"2024-10-01T00:00:01Z\", \"2024-10-07T00:00:01Z/2024-10-09T23:59:59Z\"] ), } ).json() print(json.dumps(response, indent=2))
[\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-01T00:00:01+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-07T12:00:00+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-08T12:00:00+00:00\"\n  },\n  {\n    \"concept_id\": \"C2036881735-POCLOUD\",\n    \"datetime\": \"2024-10-09T12:00:00+00:00\"\n  }\n]\n
In\u00a0[8]: Copied!
minx, miny, maxx, maxy = -180, -90, 180, 90\nrequest = httpx.get(\n    f\"{titiler_endpoint}/timeseries/bbox/{minx},{miny},{maxx},{maxy}.gif\",\n    params={\n        \"concept_id\": concept_id,\n        \"datetime\": \"2023-11-01T00:00:01Z/2024-10-30T23:59:59Z\",\n        \"step\": \"P2W\",\n        \"temporal_mode\": \"point\",\n        \"variable\": \"analysed_sst\",\n        \"backend\": \"xarray\",\n        \"colormap_name\": \"thermal\",\n        \"rescale\": [[273, 315]],\n    },\n    timeout=None,\n)\ndisplay(Image(request.content))\n
minx, miny, maxx, maxy = -180, -90, 180, 90 request = httpx.get( f\"{titiler_endpoint}/timeseries/bbox/{minx},{miny},{maxx},{maxy}.gif\", params={ \"concept_id\": concept_id, \"datetime\": \"2023-11-01T00:00:01Z/2024-10-30T23:59:59Z\", \"step\": \"P2W\", \"temporal_mode\": \"point\", \"variable\": \"analysed_sst\", \"backend\": \"xarray\", \"colormap_name\": \"thermal\", \"rescale\": [[273, 315]], }, timeout=None, ) display(Image(request.content)) In\u00a0[9]: Copied!
minx, miny, maxx, maxy = -91.464,47.353,-90.466,47.974\nrequest = httpx.get(\n    f\"{titiler_endpoint}/timeseries/bbox/{minx},{miny},{maxx},{maxy}/512x512.gif\",\n    params={\n        \"concept_id\": \"C2021957657-LPCLOUD\",\n        \"datetime\": \"2024-01-01T00:00:00Z/2024-11-30T00:00:00Z\",\n        \"step\": \"P1W\",\n        \"temporal_mode\": \"interval\",\n        \"backend\": \"rasterio\",\n        \"bands_regex\":  \"B[0-9][0-9]\",\n        \"bands\": [\"B04\", \"B03\", \"B02\"],\n        \"color_formula\": \"Gamma RGB 3.5 Saturation 1.7 Sigmoidal RGB 15 0.35\",\n        \"fps\": 5,\n    },\n    timeout=None,\n)\ndisplay(Image(request.content))\n
minx, miny, maxx, maxy = -91.464,47.353,-90.466,47.974 request = httpx.get( f\"{titiler_endpoint}/timeseries/bbox/{minx},{miny},{maxx},{maxy}/512x512.gif\", params={ \"concept_id\": \"C2021957657-LPCLOUD\", \"datetime\": \"2024-01-01T00:00:00Z/2024-11-30T00:00:00Z\", \"step\": \"P1W\", \"temporal_mode\": \"interval\", \"backend\": \"rasterio\", \"bands_regex\": \"B[0-9][0-9]\", \"bands\": [\"B04\", \"B03\", \"B02\"], \"color_formula\": \"Gamma RGB 3.5 Saturation 1.7 Sigmoidal RGB 15 0.35\", \"fps\": 5, }, timeout=None, ) display(Image(request.content)) In\u00a0[10]: Copied!
%%time\nminx, miny, maxx, maxy = -98.676, 18.857, -81.623, 31.097\ngeojson = Feature(\n    type=\"Feature\",\n    geometry=Polygon.from_bounds(minx, miny, maxx, maxy),\n    properties={},\n)\nrequest = httpx.post(\n    f\"{titiler_endpoint}/timeseries/statistics\",\n    params={\n        \"concept_id\": concept_id,\n        \"datetime\": \"2022-02-01T00:00:01Z/2024-10-30T23:59:59Z\",\n        \"step\": \"P1D\",\n        \"temporal_mode\": \"point\",\n        \"variable\": \"analysed_sst\",\n        \"backend\": \"xarray\",\n    },\n    json=geojson.model_dump(exclude_none=True),\n    timeout=None,\n)\nrequest.raise_for_status()\nresponse = request.json()\n
%%time minx, miny, maxx, maxy = -98.676, 18.857, -81.623, 31.097 geojson = Feature( type=\"Feature\", geometry=Polygon.from_bounds(minx, miny, maxx, maxy), properties={}, ) request = httpx.post( f\"{titiler_endpoint}/timeseries/statistics\", params={ \"concept_id\": concept_id, \"datetime\": \"2022-02-01T00:00:01Z/2024-10-30T23:59:59Z\", \"step\": \"P1D\", \"temporal_mode\": \"point\", \"variable\": \"analysed_sst\", \"backend\": \"xarray\", }, json=geojson.model_dump(exclude_none=True), timeout=None, ) request.raise_for_status() response = request.json()
CPU times: user 48 ms, sys: 690 \u03bcs, total: 48.7 ms\nWall time: 23.1 s\n

The /timeseries/statistics endpoint returns the GeoJSON with statistics for each step in the time series embedded in the properties.

In\u00a0[11]: Copied!
stats = response[\"properties\"][\"statistics\"]\nprint(len(stats))\n\nstats_preview = {timestamp: sst_stats for i, (timestamp, sst_stats) in enumerate(stats.items()) if i < 2}\nprint(json.dumps(stats_preview, indent=2))\n
stats = response[\"properties\"][\"statistics\"] print(len(stats)) stats_preview = {timestamp: sst_stats for i, (timestamp, sst_stats) in enumerate(stats.items()) if i < 2} print(json.dumps(stats_preview, indent=2))
1001\n{\n  \"2022-02-01T00:00:01+00:00\": {\n    \"analysed_sst\": {\n      \"min\": 285.27000000000004,\n      \"max\": 300.34000000000003,\n      \"mean\": 296.3800266967469,\n      \"count\": 2337.9599609375,\n      \"sum\": 692924.6356385816,\n      \"std\": 2.701563618833078,\n      \"median\": 296.83000000000004,\n      \"majority\": 300.16,\n      \"minority\": 285.27000000000004,\n      \"unique\": 819.0,\n      \"histogram\": [\n        [\n          14,\n          31,\n          40,\n          62,\n          88,\n          154,\n          321,\n          853,\n          378,\n          422\n        ],\n        [\n          285.27000000000004,\n          286.77700000000004,\n          288.28400000000005,\n          289.79100000000005,\n          291.29800000000006,\n          292.80500000000006,\n          294.312,\n          295.819,\n          297.326,\n          298.833,\n          300.34000000000003\n        ]\n      ],\n      \"valid_percent\": 68.49,\n      \"masked_pixels\": 1087.0,\n      \"valid_pixels\": 2363.0,\n      \"percentile_2\": 288.46000000000004,\n      \"percentile_98\": 300.20000000000005\n    }\n  },\n  \"2022-02-02T00:00:01+00:00\": {\n    \"analysed_sst\": {\n      \"min\": 285.45000000000005,\n      \"max\": 300.36,\n      \"mean\": 296.3582956145494,\n      \"count\": 2337.9599609375,\n      \"sum\": 692873.8292384959,\n      \"std\": 2.658495800828904,\n      \"median\": 296.79,\n      \"majority\": 296.59000000000003,\n      \"minority\": 285.45000000000005,\n      \"unique\": 827.0,\n      \"histogram\": [\n        [\n          14,\n          27,\n          51,\n          56,\n          90,\n          157,\n          332,\n          899,\n          329,\n          408\n        ],\n        [\n          285.45000000000005,\n          286.94100000000003,\n          288.432,\n          289.92300000000006,\n          291.41400000000004,\n          292.90500000000003,\n          294.396,\n          295.887,\n          297.37800000000004,\n          298.869,\n          300.36\n        ]\n      ],\n      \"valid_percent\": 68.49,\n      \"masked_pixels\": 1087.0,\n      \"valid_pixels\": 2363.0,\n      \"percentile_2\": 288.69000000000005,\n      \"percentile_98\": 300.15000000000003\n    }\n  }\n}\n

The statistics output can be used to generate plots like this:

In\u00a0[12]: Copied!
data = response['properties']['statistics']\n\ndates = []\nmeans = []\nstds = []\n\nfor date_str, values in data.items():\n    dates.append(datetime.fromisoformat(date_str))\n    means.append(values[\"analysed_sst\"][\"mean\"])\n    stds.append(values[\"analysed_sst\"][\"std\"])\n\nplt.figure(figsize=(10, 6))\n\nplt.plot(dates, means, \"b-\", label=\"Mean\")\n\nplt.fill_between(\n    dates, \n    np.array(means) - np.array(stds),\n    np.array(means) + np.array(stds),\n    alpha=0.2,\n    color=\"b\",\n    label=\"Standard Deviation\",\n)\n\nplt.xlabel(\"Date\")\nplt.ylabel(\"Temperature (K)\")\nplt.title(\"Mean sea surface temperature in the Gulf of Mexico\")\nplt.legend()\n\nplt.xticks(rotation=45)\n\nplt.tight_layout()\n\nplt.show()\n
data = response['properties']['statistics'] dates = [] means = [] stds = [] for date_str, values in data.items(): dates.append(datetime.fromisoformat(date_str)) means.append(values[\"analysed_sst\"][\"mean\"]) stds.append(values[\"analysed_sst\"][\"std\"]) plt.figure(figsize=(10, 6)) plt.plot(dates, means, \"b-\", label=\"Mean\") plt.fill_between( dates, np.array(means) - np.array(stds), np.array(means) + np.array(stds), alpha=0.2, color=\"b\", label=\"Standard Deviation\", ) plt.xlabel(\"Date\") plt.ylabel(\"Temperature (K)\") plt.title(\"Mean sea surface temperature in the Gulf of Mexico\") plt.legend() plt.xticks(rotation=45) plt.tight_layout() plt.show() In\u00a0[13]: Copied!
minx, miny, maxx, maxy = -180, -90, 180, 90\nrequest = httpx.get(\n    f\"{titiler_endpoint}/timeseries/WebMercatorQuad/tilejson.json\",\n    params={\n        \"concept_id\": concept_id,\n        \"datetime\": \"2023-11-01T00:00:01Z/2024-10-30T23:59:59Z\",\n        \"step\": \"P1M\",\n        \"temporal_mode\": \"point\",\n        \"variable\": \"sea_ice_fraction\",\n        \"backend\": \"xarray\",\n        \"colormap_name\": \"blues_r\",\n        \"rescale\": [[0, 1]],\n    },\n    timeout=None,\n)\ntilejsons = request.json()\ntilejson_preview = {\n    timestamp: tilejson\n    for i, (timestamp, tilejson) in enumerate(tilejsons.items())\n    if i < 2\n}\nprint(json.dumps(tilejson_preview, indent=2))\n
minx, miny, maxx, maxy = -180, -90, 180, 90 request = httpx.get( f\"{titiler_endpoint}/timeseries/WebMercatorQuad/tilejson.json\", params={ \"concept_id\": concept_id, \"datetime\": \"2023-11-01T00:00:01Z/2024-10-30T23:59:59Z\", \"step\": \"P1M\", \"temporal_mode\": \"point\", \"variable\": \"sea_ice_fraction\", \"backend\": \"xarray\", \"colormap_name\": \"blues_r\", \"rescale\": [[0, 1]], }, timeout=None, ) tilejsons = request.json() tilejson_preview = { timestamp: tilejson for i, (timestamp, tilejson) in enumerate(tilejsons.items()) if i < 2 } print(json.dumps(tilejson_preview, indent=2))
{\n  \"2023-11-01T00:00:01+00:00\": {\n    \"tilejson\": \"2.2.0\",\n    \"version\": \"1.0.0\",\n    \"scheme\": \"xyz\",\n    \"tiles\": [\n      \"https://dev-titiler-cmr.delta-backend.com/tiles/WebMercatorQuad/{z}/{x}/{y}@1x?concept_id=C2036881735-POCLOUD&variable=sea_ice_fraction&backend=xarray&colormap_name=blues_r&rescale=%5B0%2C+1%5D&concept_id=C2036881735-POCLOUD&datetime=2023-11-01T00%3A00%3A01%2B00%3A00\"\n    ],\n    \"minzoom\": 0,\n    \"maxzoom\": 24,\n    \"bounds\": [\n      -180.0,\n      -90.0,\n      180.0,\n      90.0\n    ],\n    \"center\": [\n      0.0,\n      0.0,\n      0\n    ]\n  },\n  \"2023-12-01T00:00:01+00:00\": {\n    \"tilejson\": \"2.2.0\",\n    \"version\": \"1.0.0\",\n    \"scheme\": \"xyz\",\n    \"tiles\": [\n      \"https://dev-titiler-cmr.delta-backend.com/tiles/WebMercatorQuad/{z}/{x}/{y}@1x?concept_id=C2036881735-POCLOUD&variable=sea_ice_fraction&backend=xarray&colormap_name=blues_r&rescale=%5B0%2C+1%5D&concept_id=C2036881735-POCLOUD&datetime=2023-12-01T00%3A00%3A01%2B00%3A00\"\n    ],\n    \"minzoom\": 0,\n    \"maxzoom\": 24,\n    \"bounds\": [\n      -180.0,\n      -90.0,\n      180.0,\n      90.0\n    ],\n    \"center\": [\n      0.0,\n      0.0,\n      0\n    ]\n  }\n}\n
In\u00a0[14]: Copied!
m = Map(location=[0, 0], zoom_start=3, min_zoom=3)\nfor datetime_, tilejson in tilejsons.items():\n    label = datetime.fromisoformat(datetime_).strftime(\"%Y-%m\")\n    TileLayer(\n        tiles=tilejson[\"tiles\"][0],\n        attr=\"GAMSSA SST\",\n        overlay=True,\n        name=label,\n        show=False,\n    ).add_to(m)\nLayerControl(collapsed=False).add_to(m)\nm\n
m = Map(location=[0, 0], zoom_start=3, min_zoom=3) for datetime_, tilejson in tilejsons.items(): label = datetime.fromisoformat(datetime_).strftime(\"%Y-%m\") TileLayer( tiles=tilejson[\"tiles\"][0], attr=\"GAMSSA SST\", overlay=True, name=label, show=False, ).add_to(m) LayerControl(collapsed=False).add_to(m) m Out[14]: Make this Notebook Trusted to load map: File -> Trust Notebook"},{"location":"examples/time_series_example/#time-series-api","title":"time series API\u00b6","text":"

There is a family of /timeseries endpoints in the titiler.cmr API that can be used to generate time-aware responses.

The timeseries extension provides endpoints for requesting results for all points or intervals along a time series. The /timeseries family of endpoints works by converting the provided time series parameters (datetime, step, and temporal_mode) into a set of datetime query parameters for the corresponding lower-level endpoint, running asynchronous requests to the lower-level endpoint, then collecting the results and formatting them in a coherent format for the user.

The time series structure is defined by the datetime, step, and temporal_mode parameters.

The temporal_mode mode parameter controls whether or not CMR is queried for a particular point-in-time (temporal_mode=point) or over an entire interval (temporal_mode=interval). In general, it is best to use temporal_mode=point for datasets where granules overlap completely in space (e.g. daily sea surface temperature predictions) because the /timeseries endpoints will create a mosaic of all assets returned by the query and the first asset to cover a pixel will be used. For datasets where it requires granules from multiple timestamps to fully cover an AOI, temporal_mode=interval is appropriate. For example, you can get weekly composites of satellite imagery for visualization purposes with step=P1W & temporal_mode=interval.

"},{"location":"examples/time_series_example/#time-series-parameters","title":"time series parameters\u00b6","text":"

The time series API makes it possible to return results for many points along a timeseries with a single request. The available parameters are:

  • datetime (str): Either a date-time, an interval, or a comma-separated list of date-times or intervals. Date and time expressions adhere to rfc3339 ('2020-06-01T09:00:00Z') format.
  • step (str): width of individual timesteps expressed as a IS8601 duration
  • temporal_mode (str): if \"point\", queries will be made for the individual timestamps along the timeseries. If \"interval\", queries will be made for the periods between each timestamp along the timeseries.

There are many ways to combine the parameters to produce a time series.

  1. Exact points in time from a start to and end datetime:
  • provide datetime={start_datetime}/{end_datetime}, step={step_width}, and temporal_mode=point where step_width is something like P1D for daily or P2W for bi-weekly.
  • provide datetime={start_datetime}/{end_datetime}, and temporal_mode=point without step to get a point for every unique timestamp in the granules between start_datetime and end_datetime.
  1. Fixed-width intervals between a start and end datetime:
  • provide datetime={start_datetime}/{end_datetime}, step, and temporal_mode=interval
  1. Specific datetimes
  • provide datetime=2024-10-01T00:00:01Z,2024-10-02T00:00:01Z
  1. Specific datetime intervals
  • provide datetime=2024-10-01T00:00:01Z/2024-10-01T23:59:59Z,2024-10-05T00:00:01Z/2024-10-05T23:59:59Z
"},{"location":"examples/time_series_example/#how-to-use-the-timeseries-api-with-titilercmr","title":"How to use the timeseries API with titiler.cmr\u00b6","text":"

The /timeseries endpoints work by interpreting the time series parameters (e.g. datetime and step) and parameterizing a set of lower-level requests to the related endpoint. For example, a request to /timeseries/statistics for a set of four points in time each one week apart will fire off four requests to the /statistics endpoint with a particular value in the datetime parameter. The results are collected and returned in a coherent format that can be consumed in a table or a chart.

Every /timeseries request in titiler.cmr will require both a concept_id and a set of time series parameters. The GHRSST Level 4 GAMSSA_28km Global Foundation Sea Surface Temperature Analysis v1.0 dataset (GDS2) is a useful dataset for demo purposes because the granule assets are small (~1MB each).

"},{"location":"examples/time_series_example/#time-series-for-all-granules-between-a-startend-datetime","title":"Time series for all granules between a start/end datetime\u00b6","text":"

For some datasets that have granules that are regularly spaced in time (e.g. daily), it is useful to be able to quickly specify a summary of all points in time between a start and end datetime. You can do that by simply providing the start_datetime and end_datetime parameters. The application will query CMR and produce a list of unique datetime values from the results of the granule search. If a granule represents a datetime range, it will return the midpoint between the start and end for a single granule.

"},{"location":"examples/time_series_example/#weekly-timeseries","title":"Weekly timeseries\u00b6","text":"

Sometimes you might be interested in a report with lower temporal resolution than the maximum availble for a dataset. By setting step=\"P1W\" and temporal_mode=\"point\", you can get a weekly series.

"},{"location":"examples/time_series_example/#periodic-timeseries","title":"Periodic timeseries\u00b6","text":"

Some datasets (like satellite imagery) may consist of granules that do not fully cover an arbitrary area of interest. In this case it is useful to construct a time series from a set of datetime ranges so that granules can be mosaiced to ensure each step has full coverage.

To create a set of non-overlapping week-long datetime ranges, you can modify the query to use temporal_mode=\"interval\" which will create ranges that start on the weekly values returned in the previous query and extend up to the second before the next value in the series.

"},{"location":"examples/time_series_example/#custom-time-series","title":"Custom time series\u00b6","text":"

If you want to specify the exact datetime values for a time series and you either cannot do not want to use the time series parameters, you can supply a set of comma-separated datetimes and/or datetime ranges to the datetime parameter.

"},{"location":"examples/time_series_example/#example-sea-surface-temperature-gif","title":"Example: sea surface temperature GIF\u00b6","text":"

The /timeseries/bbox endpoint can be used to produce a GIF that shows a visualization of granules over time.

The example below shows biweekly sea surface temperature estimates from the GAMSSA dataset for the period from November 2023 through October 2024.

"},{"location":"examples/time_series_example/#example-hlsl30-gif","title":"Example: HLSL30 GIF\u00b6","text":"

The example below shows a weekly mosaic of imagery from the Harmonized Landsat Sentinel L30 (HLSL30) collection for the period from January to November 2024.

"},{"location":"examples/time_series_example/#example-sea-surface-temperature-statistics","title":"Example: sea surface temperature statistics\u00b6","text":"

The /timeseries/statistics endpoint will produce summary statistics for an AOI for all points along a timeseries.

The example below shows daily sea surface temperature summary statistics for the Gulf of Mexico from the GAMSSA dataset for the period from February 2022 through October 2024.

"},{"location":"examples/time_series_example/#example-time-series-raster-tiles","title":"Example: Time series raster tiles\u00b6","text":"

It could be useful to allow users to select a timestep in an interactive map. You can use the /timeseries/tilejson endpoint for that purpose. The following example shows how you could use it to provide time series capability to an interactive map of sea ice cover.

"},{"location":"examples/xarray_backend_example/","title":"xarray backend: MUR SST","text":"In\u00a0[1]: Copied!
import json\nfrom datetime import datetime, timezone\n\nimport earthaccess\nimport httpx\nimport xarray as xr\nfrom folium import GeoJson, Map, TileLayer\n\n# titiler_endpoint = \"http://localhost:8081\"  # docker network endpoint\ntitiler_endpoint = \"https://dev-titiler-cmr.delta-backend.com\"  # deployed endpoint\n
import json from datetime import datetime, timezone import earthaccess import httpx import xarray as xr from folium import GeoJson, Map, TileLayer # titiler_endpoint = \"http://localhost:8081\" # docker network endpoint titiler_endpoint = \"https://dev-titiler-cmr.delta-backend.com\" # deployed endpoint In\u00a0[2]: Copied!
datasets = earthaccess.search_datasets(doi=\"10.5067/GHGMR-4FJ04\")\nds = datasets[0]\n\nconcept_id = ds[\"meta\"][\"concept-id\"]\nprint(\"Concept-Id: \", concept_id)\n\nprint(\"Abstract: \", ds[\"umm\"][\"Abstract\"])\n
datasets = earthaccess.search_datasets(doi=\"10.5067/GHGMR-4FJ04\") ds = datasets[0] concept_id = ds[\"meta\"][\"concept-id\"] print(\"Concept-Id: \", concept_id) print(\"Abstract: \", ds[\"umm\"][\"Abstract\"])
Concept-Id:  C1996881146-POCLOUD\nAbstract:  A Group for High Resolution Sea Surface Temperature (GHRSST) Level 4 sea surface temperature analysis produced as a retrospective dataset (four day latency) and near-real-time dataset (one day latency) at the JPL Physical Oceanography DAAC using wavelets as basis functions in an optimal interpolation approach on a global 0.01 degree grid. The version 4 Multiscale Ultrahigh Resolution (MUR) L4 analysis is based upon nighttime GHRSST L2P skin and subskin SST observations from several instruments including the NASA Advanced Microwave Scanning Radiometer-EOS (AMSR-E), the JAXA Advanced Microwave Scanning Radiometer 2 on GCOM-W1, the Moderate Resolution Imaging Spectroradiometers (MODIS) on the NASA Aqua and Terra platforms, the US Navy microwave WindSat radiometer, the Advanced Very High Resolution Radiometer (AVHRR) on several NOAA satellites, and in situ SST observations from the NOAA iQuam project. The ice concentration data are from the archives at the EUMETSAT Ocean and Sea Ice Satellite Application Facility (OSI SAF) High Latitude Processing Center and are also used for an improved SST parameterization for the high-latitudes.  The dataset also contains additional variables for some granules including a SST anomaly derived from a MUR climatology and the temporal distance to the nearest IR measurement for each pixel.This dataset is funded by the NASA MEaSUREs program ( http://earthdata.nasa.gov/our-community/community-data-system-programs/measures-projects ), and created by a team led by Dr. Toshio M. Chin from JPL. It adheres to the GHRSST Data Processing Specification (GDS) version 2 format specifications. Use the file global metadata \"history:\" attribute to determine if a granule is near-realtime or retrospective.\n
In\u00a0[3]: Copied!
results = earthaccess.search_data(\n    count=1,\n    concept_id=concept_id,\n    temporal=(\"2024-10-12\", \"2024-10-13\"),\n)\nprint(\"Granules:\")\nprint(results)\nprint()\nprint(\"Example of NetCDF URL: \")\nfor link in results[0].data_links(access=\"external\"):\n    print(link)\n
results = earthaccess.search_data( count=1, concept_id=concept_id, temporal=(\"2024-10-12\", \"2024-10-13\"), ) print(\"Granules:\") print(results) print() print(\"Example of NetCDF URL: \") for link in results[0].data_links(access=\"external\"): print(link)
Granules:\n[Collection: {'Version': '4.1', 'ShortName': 'MUR-JPL-L4-GLOB-v4.1'}\nSpatial coverage: {'HorizontalSpatialDomain': {'Geometry': {'BoundingRectangles': [{'WestBoundingCoordinate': -180, 'SouthBoundingCoordinate': -90, 'EastBoundingCoordinate': 180, 'NorthBoundingCoordinate': 90}]}}}\nTemporal coverage: {'RangeDateTime': {'EndingDateTime': '2024-10-12T21:00:00.000Z', 'BeginningDateTime': '2024-10-11T21:00:00.000Z'}}\nSize(MB): 707.340648651123\nData: ['https://archive.podaac.earthdata.nasa.gov/podaac-ops-cumulus-protected/MUR-JPL-L4-GLOB-v4.1/20241012090000-JPL-L4_GHRSST-SSTfnd-MUR-GLOB-v02.0-fv04.1.nc']]\n\nExample of NetCDF URL: \nhttps://archive.podaac.earthdata.nasa.gov/podaac-ops-cumulus-protected/MUR-JPL-L4-GLOB-v4.1/20241012090000-JPL-L4_GHRSST-SSTfnd-MUR-GLOB-v02.0-fv04.1.nc\n
In\u00a0[4]: Copied!
fs = earthaccess.get_fsspec_https_session()\n\nds = xr.open_dataset(\n    fs.open(results[0].data_links(access=\"external\")[0]),\n    engine=\"h5netcdf\",\n)\nprint(\"Data Variables:\")\nfor var in ds.data_vars:\n    print(str(var))\n\ndisplay(ds)\n
fs = earthaccess.get_fsspec_https_session() ds = xr.open_dataset( fs.open(results[0].data_links(access=\"external\")[0]), engine=\"h5netcdf\", ) print(\"Data Variables:\") for var in ds.data_vars: print(str(var)) display(ds)
Data Variables:\nanalysed_sst\nanalysis_error\nmask\nsea_ice_fraction\ndt_1km_data\nsst_anomaly\n
<xarray.Dataset> Size: 29GB\nDimensions:           (time: 1, lat: 17999, lon: 36000)\nCoordinates:\n  * time              (time) datetime64[ns] 8B 2024-10-12T09:00:00\n  * lat               (lat) float32 72kB -89.99 -89.98 -89.97 ... 89.98 89.99\n  * lon               (lon) float32 144kB -180.0 -180.0 -180.0 ... 180.0 180.0\nData variables:\n    analysed_sst      (time, lat, lon) float64 5GB ...\n    analysis_error    (time, lat, lon) float64 5GB ...\n    mask              (time, lat, lon) float32 3GB ...\n    sea_ice_fraction  (time, lat, lon) float64 5GB ...\n    dt_1km_data       (time, lat, lon) timedelta64[ns] 5GB ...\n    sst_anomaly       (time, lat, lon) float64 5GB ...\nAttributes: (12/47)\n    Conventions:                CF-1.7\n    title:                      Daily MUR SST, Final product\n    summary:                    A merged, multi-sensor L4 Foundation SST anal...\n    references:                 http://podaac.jpl.nasa.gov/Multi-scale_Ultra-...\n    institution:                Jet Propulsion Laboratory\n    history:                    created at nominal 4-day latency; replaced nr...\n    ...                         ...\n    project:                    NASA Making Earth Science Data Records for Us...\n    publisher_name:             GHRSST Project Office\n    publisher_url:              http://www.ghrsst.org\n    publisher_email:            ghrsst-po@nceo.ac.uk\n    processing_level:           L4\n    cdm_data_type:              grid
xarray.Dataset
  • Dimensions:
    • time: 1
    • lat: 17999
    • lon: 36000
  • Coordinates: (3)
    • time(time)datetime64[ns]2024-10-12T09:00:00long_name :reference time of sst fieldstandard_name :timeaxis :Tcomment :Nominal time of analyzed fields
      array(['2024-10-12T09:00:00.000000000'], dtype='datetime64[ns]')
    • lat(lat)float32-89.99 -89.98 ... 89.98 89.99long_name :latitudestandard_name :latitudeaxis :Yunits :degrees_northvalid_min :-90.0valid_max :90.0comment :geolocations inherited from the input data without correction
      array([-89.99, -89.98, -89.97, ...,  89.97,  89.98,  89.99], dtype=float32)
    • lon(lon)float32-180.0 -180.0 ... 180.0 180.0long_name :longitudestandard_name :longitudeaxis :Xunits :degrees_eastvalid_min :-180.0valid_max :180.0comment :geolocations inherited from the input data without correction
      array([-179.99, -179.98, -179.97, ...,  179.98,  179.99,  180.  ],\n      dtype=float32)
  • Data variables: (6)
    • analysed_sst(time, lat, lon)float64...long_name :analysed sea surface temperaturestandard_name :sea_surface_foundation_temperatureunits :kelvinvalid_min :-32767valid_max :32767comment :\"Final\" version using Multi-Resolution Variational Analysis (MRVA) method for interpolationsource :MODIS_T-JPL, MODIS_A-JPL, AVHRRMTB_G-NAVO, iQUAM-NOAA/NESDIS, Ice_Conc-OSISAF
      [647964000 values with dtype=float64]
    • analysis_error(time, lat, lon)float64...long_name :estimated error standard deviation of analysed_sstunits :kelvinvalid_min :0valid_max :32767comment :uncertainty in \"analysed_sst\"
      [647964000 values with dtype=float64]
    • mask(time, lat, lon)float32...long_name :sea/land field composite maskvalid_min :1valid_max :31flag_masks :[ 1 2 4 8 16]flag_meanings :open_sea land open_lake open_sea_with_ice_in_the_grid open_lake_with_ice_in_the_gridcomment :mask can be used to further filter the data.source :GMT \"grdlandmask\", ice flag from sea_ice_fraction data
      [647964000 values with dtype=float32]
    • sea_ice_fraction(time, lat, lon)float64...long_name :sea ice area fractionstandard_name :sea_ice_area_fractionvalid_min :0valid_max :100source :EUMETSAT OSI-SAF, copyright EUMETSATcomment :ice fraction is a dimensionless quantity between 0 and 1; it has been interpolated by a nearest neighbor approach; EUMETSAT OSI-SAF files used: ice_conc_nh_polstere-100_multi_202410121200.nc, ice_conc_sh_polstere-100_multi_202410121200.nc.
      [647964000 values with dtype=float64]
    • dt_1km_data(time, lat, lon)timedelta64[ns]...long_name :time to most recent 1km datavalid_min :-127valid_max :127source :MODIS and VIIRS pixels ingested by MURcomment :The grid value is hours between the analysis time and the most recent MODIS or VIIRS 1km L2P datum within 0.01 degrees from the grid point. \"Fill value\" indicates absence of such 1km data at the grid point.
      [647964000 values with dtype=timedelta64[ns]]
    • sst_anomaly(time, lat, lon)float64...long_name :SST anomaly from a seasonal SST climatology based on the MUR data over 2003-2014 periodunits :kelvinvalid_min :-32767valid_max :32767comment :anomaly reference to the day-of-year average between 2003 and 2014
      [647964000 values with dtype=float64]
  • Indexes: (3)
    • timePandasIndex
      PandasIndex(DatetimeIndex(['2024-10-12 09:00:00'], dtype='datetime64[ns]', name='time', freq=None))
    • latPandasIndex
      PandasIndex(Index([-89.98999786376953,  -89.9800033569336, -89.97000122070312,\n       -89.95999908447266, -89.94999694824219, -89.94000244140625,\n       -89.93000030517578, -89.91999816894531, -89.91000366210938,\n        -89.9000015258789,\n       ...\n         89.9000015258789,  89.91000366210938,  89.91999816894531,\n        89.93000030517578,  89.94000244140625,  89.94999694824219,\n        89.95999908447266,  89.97000122070312,   89.9800033569336,\n        89.98999786376953],\n      dtype='float32', name='lat', length=17999))
    • lonPandasIndex
      PandasIndex(Index([-179.99000549316406, -179.97999572753906, -179.97000122070312,\n        -179.9600067138672,  -179.9499969482422, -179.94000244140625,\n       -179.92999267578125,  -179.9199981689453, -179.91000366210938,\n       -179.89999389648438,\n       ...\n        179.91000366210938,   179.9199981689453,  179.92999267578125,\n        179.94000244140625,   179.9499969482422,   179.9600067138672,\n        179.97000122070312,  179.97999572753906,  179.99000549316406,\n                     180.0],\n      dtype='float32', name='lon', length=36000))
  • Attributes: (47)Conventions :CF-1.7title :Daily MUR SST, Final productsummary :A merged, multi-sensor L4 Foundation SST analysis product from JPL.references :http://podaac.jpl.nasa.gov/Multi-scale_Ultra-high_Resolution_MUR-SSTinstitution :Jet Propulsion Laboratoryhistory :created at nominal 4-day latency; replaced nrt (1-day latency) version.comment :MUR = \"Multi-scale Ultra-high Resolution\"license :These data are available free of charge under data policy of JPL PO.DAAC.id :MUR-JPL-L4-GLOB-v04.1naming_authority :org.ghrsstproduct_version :04.1uuid :27665bc0-d5fc-11e1-9b23-0800200c9a66gds_version_id :2.0netcdf_version_id :4.1date_created :20241021T071625Zstart_time :20241012T090000Zstop_time :20241012T090000Ztime_coverage_start :20241011T210000Ztime_coverage_end :20241012T210000Zfile_quality_level :3source :MODIS_T-JPL, MODIS_A-JPL, AVHRRMTB_G-NAVO, iQUAM-NOAA/NESDIS, Ice_Conc-OSISAFplatform :Terra, Aqua, MetOp-B, Buoys/Shipssensor :MODIS, AVHRR, in-situMetadata_Conventions :Unidata Observation Dataset v1.0metadata_link :http://podaac.jpl.nasa.gov/ws/metadata/dataset/?format=iso&shortName=MUR-JPL-L4-GLOB-v04.1keywords :Oceans > Ocean Temperature > Sea Surface Temperaturekeywords_vocabulary :NASA Global Change Master Directory (GCMD) Science Keywordsstandard_name_vocabulary :NetCDF Climate and Forecast (CF) Metadata Conventionsouthernmost_latitude :-90.0northernmost_latitude :90.0westernmost_longitude :-180.0easternmost_longitude :180.0spatial_resolution :0.01 degreesgeospatial_lat_units :degrees northgeospatial_lat_resolution :0.01geospatial_lon_units :degrees eastgeospatial_lon_resolution :0.01acknowledgment :Please acknowledge the use of these data with the following statement: These data were provided by JPL under support by NASA MEaSUREs program.creator_name :JPL MUR SST projectcreator_email :ghrsst@podaac.jpl.nasa.govcreator_url :http://mur.jpl.nasa.govproject :NASA Making Earth Science Data Records for Use in Research Environments (MEaSUREs) Programpublisher_name :GHRSST Project Officepublisher_url :http://www.ghrsst.orgpublisher_email :ghrsst-po@nceo.ac.ukprocessing_level :L4cdm_data_type :grid
In\u00a0[5]: Copied!
variable = \"sea_ice_fraction\"\ndatetime_ = datetime(2024, 10, 10, tzinfo=timezone.utc).isoformat()\n
variable = \"sea_ice_fraction\" datetime_ = datetime(2024, 10, 10, tzinfo=timezone.utc).isoformat() In\u00a0[6]: Copied!
r = httpx.get(\n    f\"{titiler_endpoint}/WebMercatorQuad/tilejson.json\",\n    params = (\n        (\"concept_id\", concept_id),\n        # Datetime in form of `start_date/end_date`\n        (\"datetime\", datetime_),\n        # titiler-cmr can work with both Zarr and COG dataset\n        # but we need to tell the endpoints in advance which backend\n        # to use\n        (\"backend\", \"xarray\"),\n        (\"variable\", variable),\n        # We need to set min/max zoom because we don't want to use lowerzoom level (e.g 0)\n        # which will results in useless large scale query\n        (\"minzoom\", 2),\n        (\"maxzoom\", 13),\n        (\"rescale\", \"0,1\"),\n        (\"colormap_name\", \"blues_r\"),\n    )\n).json()\n\nprint(r)\n
r = httpx.get( f\"{titiler_endpoint}/WebMercatorQuad/tilejson.json\", params = ( (\"concept_id\", concept_id), # Datetime in form of `start_date/end_date` (\"datetime\", datetime_), # titiler-cmr can work with both Zarr and COG dataset # but we need to tell the endpoints in advance which backend # to use (\"backend\", \"xarray\"), (\"variable\", variable), # We need to set min/max zoom because we don't want to use lowerzoom level (e.g 0) # which will results in useless large scale query (\"minzoom\", 2), (\"maxzoom\", 13), (\"rescale\", \"0,1\"), (\"colormap_name\", \"blues_r\"), ) ).json() print(r)
{'tilejson': '2.2.0', 'version': '1.0.0', 'scheme': 'xyz', 'tiles': ['https://dev-titiler-cmr.delta-backend.com/tiles/WebMercatorQuad/{z}/{x}/{y}@1x?concept_id=C1996881146-POCLOUD&datetime=2024-10-10T00%3A00%3A00%2B00%3A00&backend=xarray&variable=sea_ice_fraction&rescale=0%2C1&colormap_name=blues_r'], 'minzoom': 2, 'maxzoom': 13, 'bounds': [-180.0, -90.0, 180.0, 90.0], 'center': [0.0, 0.0, 2]}\n
In\u00a0[7]: Copied!
bounds = r[\"bounds\"]\nm = Map(\n    location=(70, -40),\n    zoom_start=3\n)\n\nTileLayer(\n    tiles=r[\"tiles\"][0],\n    opacity=1,\n    attr=\"NASA\",\n).add_to(m)\nm\n
bounds = r[\"bounds\"] m = Map( location=(70, -40), zoom_start=3 ) TileLayer( tiles=r[\"tiles\"][0], opacity=1, attr=\"NASA\", ).add_to(m) m Out[7]: Make this Notebook Trusted to load map: File -> Trust Notebook In\u00a0[8]: Copied!
geojson_dict = {\n  \"type\": \"FeatureCollection\",\n  \"features\": [\n    {\n      \"type\": \"Feature\",\n      \"properties\": {},\n      \"geometry\": {\n        \"coordinates\": [\n          [\n            [\n              -20.79973248834736,\n              83.55979308678764\n            ],\n            [\n              -20.79973248834736,\n              75.0115425216471\n            ],\n            [\n              14.483337068956956,\n              75.0115425216471\n            ],\n            [\n              14.483337068956956,\n              83.55979308678764\n            ],\n            [\n              -20.79973248834736,\n              83.55979308678764\n            ]\n          ]\n        ],\n        \"type\": \"Polygon\"\n      }\n    }\n  ]\n}\n\nr = httpx.post(\n    f\"{titiler_endpoint}/statistics\",\n    params=(\n        (\"concept_id\", concept_id),\n        # Datetime in form of `start_date/end_date`\n        (\"datetime\", datetime_),\n        # titiler-cmr can work with both Zarr and COG dataset\n        # but we need to tell the endpoints in advance which backend\n        # to use\n        (\"backend\", \"xarray\"),\n        (\"variable\", variable),\n    ),\n    json=geojson_dict,\n    timeout=60,\n).json()\n\nprint(json.dumps(r, indent=2))\n
geojson_dict = { \"type\": \"FeatureCollection\", \"features\": [ { \"type\": \"Feature\", \"properties\": {}, \"geometry\": { \"coordinates\": [ [ [ -20.79973248834736, 83.55979308678764 ], [ -20.79973248834736, 75.0115425216471 ], [ 14.483337068956956, 75.0115425216471 ], [ 14.483337068956956, 83.55979308678764 ], [ -20.79973248834736, 83.55979308678764 ] ] ], \"type\": \"Polygon\" } } ] } r = httpx.post( f\"{titiler_endpoint}/statistics\", params=( (\"concept_id\", concept_id), # Datetime in form of `start_date/end_date` (\"datetime\", datetime_), # titiler-cmr can work with both Zarr and COG dataset # but we need to tell the endpoints in advance which backend # to use (\"backend\", \"xarray\"), (\"variable\", variable), ), json=geojson_dict, timeout=60, ).json() print(json.dumps(r, indent=2))
{\n  \"type\": \"FeatureCollection\",\n  \"features\": [\n    {\n      \"type\": \"Feature\",\n      \"geometry\": {\n        \"type\": \"Polygon\",\n        \"coordinates\": [\n          [\n            [\n              -20.79973248834736,\n              83.55979308678764\n            ],\n            [\n              -20.79973248834736,\n              75.0115425216471\n            ],\n            [\n              14.483337068956956,\n              75.0115425216471\n            ],\n            [\n              14.483337068956956,\n              83.55979308678764\n            ],\n            [\n              -20.79973248834736,\n              83.55979308678764\n            ]\n          ]\n        ]\n      },\n      \"properties\": {\n        \"statistics\": {\n          \"sea_ice_fraction\": {\n            \"min\": 0.3,\n            \"max\": 0.99,\n            \"mean\": 0.845157064600111,\n            \"count\": 1725290.875,\n            \"sum\": 1458141.771496357,\n            \"std\": 0.1559272507275522,\n            \"median\": 0.9,\n            \"majority\": 0.9500000000000001,\n            \"minority\": 0.36,\n            \"unique\": 70.0,\n            \"histogram\": [\n              [\n                34892,\n                39574,\n                38696,\n                37867,\n                44348,\n                72817,\n                110580,\n                200188,\n                472678,\n                675707\n              ],\n              [\n                0.3,\n                0.369,\n                0.43799999999999994,\n                0.5069999999999999,\n                0.576,\n                0.645,\n                0.714,\n                0.7829999999999999,\n                0.8519999999999999,\n                0.9209999999999998,\n                0.99\n              ]\n            ],\n            \"valid_percent\": 57.18,\n            \"masked_pixels\": 1293477.0,\n            \"valid_pixels\": 1727347.0,\n            \"percentile_2\": 0.36,\n            \"percentile_98\": 0.99\n          }\n        }\n      }\n    }\n  ]\n}\n
In\u00a0[\u00a0]: Copied!
\n
"},{"location":"examples/xarray_backend_example/#xarray-backend-mur-sst","title":"xarray backend: MUR SST\u00b6","text":"

The MUR SST dataset has daily records for sea surface temperature and ice cover fraction. There is a netcdf file for each record.

To run the titiler-cmr service locally you can fire up the docker network with this command:

docker compose up\n
"},{"location":"examples/xarray_backend_example/#requirements","title":"Requirements\u00b6","text":"

To run some of the chunks in this notebook you will need to install a few packages: earthaccess, folium, httpx, xarray

"},{"location":"examples/xarray_backend_example/#identify-the-dataset","title":"Identify the dataset\u00b6","text":"

You can find the MUR SST dataset using the earthaccess.search_datasets function.

"},{"location":"examples/xarray_backend_example/#examine-a-granule","title":"Examine a granule\u00b6","text":"

Each granule contains a single day record for the entire globe and has a single data file.

"},{"location":"examples/xarray_backend_example/#explore-the-available-variables","title":"Explore the available variables\u00b6","text":"

The NetCDF file can be opened with xarray using the h5netcdf engine. When running outside of AWS region us-west-2 you will need to access the data using \"external\" https links (rather than \"direct\" s3 links). Those links will require authentication which is handled by earthaccess as long as you have your Earthdata credentials stored in the ~/.netrc file!

"},{"location":"examples/xarray_backend_example/#define-a-query-for-titiler-cmr","title":"Define a query for titiler-cmr\u00b6","text":"

To use titiler-cmr's endpoints for a NetCDF dataset like this we need to define a date range for the CMR query and a variable to analyze.

"},{"location":"examples/xarray_backend_example/#display-tiles-in-an-interactive-map","title":"Display tiles in an interactive map\u00b6","text":"

The /tilejson.json endpoint will provide a parameterized xyz tile URL that can be added to an interactive map.

"},{"location":"examples/xarray_backend_example/#geojson-statistics","title":"GeoJSON Statistics\u00b6","text":"

The /statistics endpoint can be used to get summary statistics for a geojson Feature or FeatureCollection.

"}]} \ No newline at end of file diff --git a/pr-previews/pr-42/sitemap.xml b/pr-previews/pr-42/sitemap.xml index 7790bf3..61584f8 100644 --- a/pr-previews/pr-42/sitemap.xml +++ b/pr-previews/pr-42/sitemap.xml @@ -2,34 +2,34 @@ https://developmentseed.org/titiler-cmr/ - 2024-12-06 + 2024-12-07 https://developmentseed.org/titiler-cmr/benchmark_analysis/ - 2024-12-06 + 2024-12-07 https://developmentseed.org/titiler-cmr/contributing/ - 2024-12-06 + 2024-12-07 https://developmentseed.org/titiler-cmr/release-notes/ - 2024-12-06 + 2024-12-07 https://developmentseed.org/titiler-cmr/time_series_performance_benchmarks/ - 2024-12-06 + 2024-12-07 https://developmentseed.org/titiler-cmr/examples/rasterio_backend_example/ - 2024-12-06 + 2024-12-07 https://developmentseed.org/titiler-cmr/examples/time_series_example/ - 2024-12-06 + 2024-12-07 https://developmentseed.org/titiler-cmr/examples/xarray_backend_example/ - 2024-12-06 + 2024-12-07 \ No newline at end of file diff --git a/pr-previews/pr-42/sitemap.xml.gz b/pr-previews/pr-42/sitemap.xml.gz index 0e0ebae..a37ba7c 100644 Binary files a/pr-previews/pr-42/sitemap.xml.gz and b/pr-previews/pr-42/sitemap.xml.gz differ diff --git a/pr-previews/pr-42/time_series_performance_benchmarks/index.html b/pr-previews/pr-42/time_series_performance_benchmarks/index.html index 053f76b..f2d3bae 100644 --- a/pr-previews/pr-42/time_series_performance_benchmarks/index.html +++ b/pr-previews/pr-42/time_series_performance_benchmarks/index.html @@ -1141,7 +1141,11 @@

Time series performance benchmarks @@ -1175,23 +1179,27 @@

statistics -
ba.plot_error_rate_heatmap(
-    df=ba.dfs["statistics"],
-    x="num_timepoints",
-    y="bbox_dims",
-    z="error_rate",
-    labels={"x": "number of time points", "y": "bbox dimensions", "color": "error rate"},
-    title="error rate by bbox size and number of time points",
-)
+
for dataset, df in ba.dfs["statistics"].items():
+    fig = ba.plot_error_rate_heatmap(
+        df=df,
+        x="num_timepoints",
+        y="bbox_dims",
+        z="error_rate",
+        labels={"x": "number of time points", "y": "bbox dimensions", "color": "error rate"},
+        title=f"{dataset}: error rate by bbox size and number of time points",
+    )
+    fig.show()
 
-
ba.plot_error_rate_heatmap( - df=ba.dfs["statistics"], - x="num_timepoints", - y="bbox_dims", - z="error_rate", - labels={"x": "number of time points", "y": "bbox dimensions", "color": "error rate"}, - title="error rate by bbox size and number of time points", -)
+
for dataset, df in ba.dfs["statistics"].items(): + fig = ba.plot_error_rate_heatmap( + df=df, + x="num_timepoints", + y="bbox_dims", + z="error_rate", + labels={"x": "number of time points", "y": "bbox dimensions", "color": "error rate"}, + title=f"{dataset}: error rate by bbox size and number of time points", + ) + fig.show()
@@ -1228,9 +1236,39 @@

statistics + + + +