# External modules
import hvplot
import hvplot.pandas
import holoviews as hv
import json
import pandas as pd
= 'holoviews'
pd.options.plotting.backend import rioxarray
import warnings
'ignore')
warnings.filterwarnings(
# Local modules
import sys; sys.path.append('..')
from cog_tile_test import CogTileTest
import helpers.dataframe as dataframe_helpers
import helpers.eodc_hub_role as eodc_hub_role
from xarray_tile_test import XarrayTileTest
Tile Generation Benchmarks across Data Formats
Explanation
In this notebook we compare the performance of tiling CMIP6 data stored as COG, NetCDF and Zarr. In order to tile the NetCDF, we use a kerchunk reference file. You are able to use the ZarrReader with NetCDF directly, however you cannot read more than file at once which makes it incomparable with the pgSTAC+COG and Zarr methods.
Setup
= eodc_hub_role.fetch_and_set_credentials() credentials
Below we only load the CMIP6 Zarr dataset which has the same chunk structure as the original NetCDF data.
# Run 3 iterations of each setting
= 10
iterations = range(6)
zooms = list(json.loads(open('../01-generate-datasets/cmip6-pgstac/cog-datasets.json').read()).items())[0] cog_dataset_id, cog_dataset
= list(json.loads(open('../01-generate-datasets/cmip6-kerchunk-dataset.json').read()).items())[0] kerchunk_dataset_id, kerchunk_dataset
= json.loads(open('../01-generate-datasets/cmip6-zarr-datasets.json').read())
zarr_datasets = {k: v for k, v in zarr_datasets.items() if '600_1440_1' in k}
filtered_dict = list(filtered_dict.items())[0] zarr_dataset_id, zarr_dataset
Run Tests
COG Tests
# Based on our findings in 01-cog-gdal-tests we run these tests with set_gdal_vars to True.
= CogTileTest(
cog_tile_test =cog_dataset_id,
dataset_id=[-59, 89],
lat_extent=[-179, 179],
lon_extent={
extra_args'query': cog_dataset['example_query'],
'set_gdal_vars': True,
'credentials': credentials
}
)
# Run it 3 times for each zoom level
for zoom in zooms:
'zoom': zoom}, batch_size=iterations)
cog_tile_test.run_batch({
= cog_tile_test.store_results(credentials) cog_results
Caught exception: An error occurred (InvalidPermission.Duplicate) when calling the AuthorizeSecurityGroupIngress operation: the specified rule "peer: 35.93.112.139/32, TCP, from port: 5432, to port: 5432, ALLOW" already exists
Connected to database
Wrote instance data to s3://nasa-eodc-data-store/test-results/20230907003859_CogTileTest_CMIP6_daily_GISS-E2-1-G_tas.json
Kerchunk Tests
= XarrayTileTest(
kerchunk_tile_test =kerchunk_dataset_id,
dataset_id**kerchunk_dataset
)
# Run many times for each zoom level
for zoom in zooms:
'zoom': zoom}, batch_size=iterations)
kerchunk_tile_test.run_batch({
= kerchunk_tile_test.store_results(credentials) kerchunk_results
Wrote instance data to s3://nasa-eodc-data-store/test-results/20230907003910_XarrayTileTest_cmip6-kerchunk.json
= XarrayTileTest(
zarr_tile_test =zarr_dataset_id,
dataset_id**zarr_dataset
)
# Run it 3 times for each zoom level
for zoom in zooms:
'zoom': zoom}, batch_size=iterations)
zarr_tile_test.run_batch({
= zarr_tile_test.store_results(credentials) zarr_results
Wrote instance data to s3://nasa-eodc-data-store/test-results/20230907003922_XarrayTileTest_600_1440_1_CMIP6_daily_GISS-E2-1-G_tas.zarr.json
Read and Plot Results
= [cog_results, zarr_results, kerchunk_results]
all_urls = dataframe_helpers.load_all_into_dataframe(credentials, all_urls)
all_df = dataframe_helpers.expand_timings(all_df) expanded_df
'data_format'] = 'Unknown'
expanded_df[# Define the conditions
'dataset_id'] == cog_dataset_id, 'data_format'] = 'COG'
expanded_df.loc[expanded_df['dataset_id'] == zarr_dataset_id, 'data_format'] = 'Zarr'
expanded_df.loc[expanded_df['dataset_id'] == kerchunk_dataset_id, 'data_format'] = 'kerchunk' expanded_df.loc[expanded_df[
= ["#E1BE6A", "#40B0A6"]
cmap = {"width": 300, "height": 250}
plt_opts
= []
plts
for zoom_level in zooms:
= expanded_df[expanded_df["zoom"] == zoom_level]
df_level
plts.append(
df_level.hvplot.box(="time",
y=["data_format"],
by="data_format",
c=cmap,
cmap="Time to render (ms)",
ylabel="Data Format",
xlabel=False,
legend=f"Zoom level {zoom_level}",
title**plt_opts)
).opts(
)
2) hv.Layout(plts).cols(
'results-csvs/02-cog-kerchunk-zarr-results.csv') expanded_df.to_csv(