From 978c98dd2a2cd277469560d439b3a8e874f472a8 Mon Sep 17 00:00:00 2001 From: Ana Ordonez Date: Mon, 14 Jun 2021 12:00:56 -0700 Subject: [PATCH 1/9] refactor output metadata --- README.md | 20 +- coastal-storm-metrics.sh | 33 ++ cymep/cymep.py | 149 ++++--- cymep/functions/getSettings.py | 29 ++ cymep/functions/output_templates/README.md | 5 + .../output_templates/html_template.txt | 12 + cymep/functions/write_cmec.py | 244 ++++++++++++ cymep/functions/write_spatial.py | 372 ++++++++++++++++-- cymep/graphics-cymep.sh | 14 +- cymep/plotting/plot-spatial.ncl | 24 +- cymep/plotting/plot-table.ncl | 47 +-- cymep/plotting/plot-taylor.ncl | 239 +++++------ cymep/plotting/plot-temporal.ncl | 23 +- settings.json | 54 +++ 14 files changed, 1026 insertions(+), 239 deletions(-) create mode 100755 coastal-storm-metrics.sh create mode 100644 cymep/functions/getSettings.py create mode 100644 cymep/functions/output_templates/README.md create mode 100644 cymep/functions/output_templates/html_template.txt create mode 100644 cymep/functions/write_cmec.py create mode 100644 settings.json diff --git a/README.md b/README.md index 1a9b656..d762972 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ start 31 1980 1 6 6 482 303 120.500000 -14.250000 9.987638e+04 1.464815e+01 0.000000e+00 1980 1 6 6 476 301 119.000000 -14.750000 9.981100e+04 1.398848e+01 0.000000e+00 1980 1 6 12 476 300 119.000000 -15.000000 9.953694e+04 1.369575e+01 0.000000e+00 1980 1 6 18 - + ... ``` @@ -132,3 +132,21 @@ $> ./graphics-cymep.sh netcdf-files/netcdf_GLOB_rean_configs.nc ``` This will produce a suite of figures in various subfolders within `./fig/`. + + +### Run with CMEC driver + +An alternative workflow is available with [cmec-driver](https://github.com/cmecmetrics/cmec-driver). The workflow is: +1. Clone the coastal-storm-metrics repo. +2. Install dependencies. +3. From the `cmec-driver` directory, register CyMeP in the cmec library: +`python cmec-driver.py register ` +4. Add TempestExtremes ASCII trajectories to `cmec-driver/model`. + - For testing, copy `cymep/trajs/*` to `cmec-driver/model` +5. Create configuration csv in cmec-driver/model. + - For testing, copy `cymep/config-lists/rean_configs.csv` to `cmec-driver/model`. +6. Edit user settings in cmec-driver/config/cmec.json. + - For testing, use the default settings. +7. Run CyMeP module from cmec-driver: +`python cmec-driver.py run model/ output/ CyMeP` +8. Open `cmec-driver/output/CyMeP/index.html` to view results. diff --git a/coastal-storm-metrics.sh b/coastal-storm-metrics.sh new file mode 100755 index 0000000..e95e437 --- /dev/null +++ b/coastal-storm-metrics.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# This is the driver script for running CyMeP via cmec-driver + +echo "Running Cyclone Metrics Package" +cymep_log=${CMEC_WK_DIR}/CyMeP.log.txt +echo "log:" $cymep_log +cd ${CMEC_CODE_DIR}/cymep +python cymep.py > $cymep_log + +if [[ $? = 0 ]]; then + echo "Success in Cyclone Metrics Package" + + echo "Generating graphics" + + # Make figures for each netcdf + for file in ${CMEC_WK_DIR}/netcdf-files/*; do + echo "\n"$file >> $cymep_log + ncl ./plotting/plot-spatial.ncl 'out_type="png"' 'ncfile="'$file'"' >> $cymep_log + ncl ./plotting/plot-temporal.ncl 'out_type="png"' 'ncfile="'$file'"' >> $cymep_log + ncl ./plotting/plot-taylor.ncl 'out_type="png"' 'ncfile="'$file'"' >> $cymep_log + + ncl ./plotting/plot-table.ncl plot_bias=False relative_performance=True invert_stoplight=False calc_deltas=False write_units=False 'out_type="png"' 'csvtype="spatial_corr"' 'ncfile="'$file'"' >> $cymep_log + ncl ./plotting/plot-table.ncl plot_bias=True relative_performance=False invert_stoplight=False calc_deltas=True write_units=True 'out_type="png"' 'csvtype="climo_mean"' 'ncfile="'$file'"' >> $cymep_log + ncl ./plotting/plot-table.ncl plot_bias=True relative_performance=False invert_stoplight=False calc_deltas=True write_units=True 'out_type="png"' 'csvtype="storm_mean"' 'ncfile="'$file'"' >> $cymep_log + ncl ./plotting/plot-table.ncl plot_bias=False relative_performance=True invert_stoplight=False calc_deltas=False write_units=False 'out_type="png"' 'csvtype="temporal_scorr"' 'ncfile="'$file'"' >> $cymep_log + done + + # Document figures in output.json and create html page + python functions/write_cmec.py >> $cymep_log + +else + echo "Failure in Cyclone Metrics Package" +fi diff --git a/cymep/cymep.py b/cymep/cymep.py index ce8bcf2..34de6d5 100644 --- a/cymep/cymep.py +++ b/cymep/cymep.py @@ -2,10 +2,13 @@ import re import numpy as np import pandas as pd +import json import scipy.stats as sps from netCDF4 import Dataset +import json sys.path.insert(0, './functions') +from getSettings import * from getTrajectories import * from mask_tc import * from track_density import * @@ -16,7 +19,7 @@ ##### User settings basin = 1 -csvfilename = 'sens_configs.csv' +csvfilename = 'rean_configs.csv' gridsize = 8.0 styr = 1980 enyr = 2020 @@ -29,6 +32,35 @@ do_fill_missing_pw = True do_defineMIbypres = False +# IO settings (Users: do not change) +wkdir = '.' +modeldir = 'trajs' +csvdir = 'config-lists' +cmec_driver = False + +## CMEC driver + +# If package is being run via cmec-driver, the following code section +# will override the above user settings with information from the CMEC +# environment variables and settings JSON. + +# Get CMEC environment vars +if os.getenv("CMEC_WK_DIR") is not None: + wkdir = os.getenv("CMEC_WK_DIR") +if os.getenv("CMEC_MODEL_DATA") is not None: + modeldir = os.getenv("CMEC_MODEL_DATA") + csvdir = modeldir + +# If using config json, load those settings +if os.getenv("CMEC_CONFIG_DIR") is not None: + cmec_driver = True + user_settings_json = os.path.join(os.getenv("CMEC_CONFIG_DIR"),"cmec.json") + user_settings = load_settings_from_json(user_settings_json) + # Set user settings as global variables + globals().update(user_settings) + print("basin is: ") + print(basin) + #---------------------------------------------------------------------------------------- # Constants @@ -36,11 +68,9 @@ pi = 3.141592653589793 deg2rad = pi / 180. -#---------------------------------------------------------------------------------------- - # Read in configuration file and parse columns for each case # Ignore commented lines starting with ! -df=pd.read_csv("./config-lists/"+csvfilename, sep=',', comment='!', header=None) +df=pd.read_csv(csvdir+"/"+csvfilename, sep=',', comment='!', header=None) files = df.loc[ : , 0 ] strs = df.loc[ : , 1 ] isUnstructStr = df.loc[ : , 2 ] @@ -61,28 +91,28 @@ for x in pyvars: pydict[x] = np.empty((nfiles, nyears)) pydict[x][:] = np.nan - + # Init per month arrays pmdict = {} pmvars = ['pm_count','pm_tcd','pm_ace','pm_pace','pm_lmi'] for x in pmvars: pmdict[x] = np.empty((nfiles, nmonths)) pmdict[x][:] = np.nan - + # Init per year arrays aydict = {} ayvars = ['uclim_count','uclim_tcd','uclim_ace','uclim_pace','uclim_lmi'] for x in ayvars: aydict[x] = np.empty(nfiles) aydict[x][:] = np.nan - + # Init per storm arrays asdict = {} asvars = ['utc_tcd','utc_ace','utc_pace','utc_latgen','utc_lmi'] for x in asvars: asdict[x] = np.empty(nfiles) asdict[x][:] = np.nan - + # Get basin string strbasin=getbasinmaskstr(basin) @@ -95,13 +125,13 @@ print("First character is /, using absolute path") trajfile=files[ii] else: - trajfile='trajs/'+files[ii] + trajfile=modeldir+"/"+files[ii] isUnstruc=isUnstructStr[ii] nVars=-1 headerStr='start' - + wind_factor = windcorrs[ii] - + # Determine the number of model years available in our dataset if truncate_years: #print("Truncating years from "+yearspermember(zz)+" to "+nyears) @@ -119,7 +149,7 @@ xwind = traj_data[5,:,:]*wind_factor xyear = traj_data[7,:,:] xmonth = traj_data[8,:,:] - + # Initialize nan'ed arrays specific to this traj file xglon = np.empty(nstorms) xglat = np.empty(nstorms) @@ -133,7 +163,7 @@ xgyear[:] = np.nan xlatmi[:] = np.nan xlonmi[:] = np.nan - + # Fill in missing values of pressure and wind if do_fill_missing_pw: aaa=2.3 @@ -156,7 +186,7 @@ xpres = np.where((xpres < 0.0),1008.,xpres) xwind = np.where((xwind < 0.0),15.,xwind) print("Num fills for PW " + str(numfixes_1) + " " + str(numfixes_2) + " " + str(numfixes_3)) - + # Filter observational records # if "control" record and do_special_filter_obs = true, we can apply specific # criteria here to match objective tracks better @@ -231,7 +261,7 @@ maskoff = True if truncate_years: if oriyear < styr or oriyear > enyr: - maskoff = True + maskoff = True if maskoff: xlon[kk,:] = float('NaN') xlat[kk,:] = float('NaN') @@ -243,19 +273,19 @@ xglat[kk] = float('NaN') xgmonth[kk] = float('NaN') xgyear[kk] = float('NaN') - + ######################################### - + # Calculate LMI for kk, zz in enumerate(range(nstorms)): if not np.isnan(xglat[kk]): if do_defineMIbypres: locMI=np.nanargmin(xpres[kk,:]) - else: + else: locMI=np.nanargmax(xwind[kk,:]) xlatmi[kk]=xlat[kk,locMI] - xlonmi[kk]=xlon[kk,locMI] - + xlonmi[kk]=xlon[kk,locMI] + # Flip LMI sign in SH to report poleward values when averaging abs_lats=True if abs_lats: @@ -277,12 +307,12 @@ # Calculate storm-accumulated PACE calcPolyFitPACE=True xprestmp = xpres - + # Threshold PACE if requested if THRESHOLD_PACE_PRES > 0: print("Thresholding PACE to only TCs < "+str(THRESHOLD_PACE_PRES)+" hPa") xprestmp = np.where(xprestmp > THRESHOLD_PACE_PRES,float('NaN'),xprestmp) - + xprestmp = np.ma.array(xprestmp, mask=np.isnan(xprestmp)) np.warnings.filterwarnings('ignore') if calcPolyFitPACE: @@ -300,20 +330,20 @@ # Here, we apply a predetermined PW relationship from Holland xprestmp = np.ma.where(xprestmp < 1010.0, xprestmp, 1010.0) xpacepp = 1.0e-4 * (ms_to_kts*2.3*(1010.-xprestmp)**0.76)**2. - + # Calculate PACE from xpacepp xpace = np.nansum( xpacepp , axis = 1) - + # Get maximum intensity and TCD xmpres = np.nanmin( xpres , axis=1 ) xmwind = np.nanmax( xwind , axis=1 ) xtcd = np.nansum( xtcdpp, axis=1 ) - + # Need to get rid of storms with no TC, ACE or PACE xtcd = np.where(xtcd == 0,float('NaN'),xtcd) xace = np.where(xace == 0,float('NaN'),xace) xpace = np.where(xpace == 0,float('NaN'),xpace) - + # Bin storms per dataset per calendar month for jj in range(1, 12+1): pmdict['pm_count'][ii,jj-1] = np.count_nonzero(xgmonth == jj) / nmodyears @@ -332,7 +362,7 @@ pydict['py_pace'][ii,yrix] = np.nansum( np.where(xgyear == jj,xpace,0.0) ) / ensmembers[ii] pydict['py_lmi'][ii,yrix] = np.nanmean( np.where(xgyear == jj,xlatmi,float('NaN')) ) pydict['py_latgen'][ii,yrix] = np.nanmean( np.where(xgyear == jj,np.absolute(xglat),float('NaN')) ) - + # Calculate control interannual standard deviations if ii == 0: stdydict={} @@ -342,21 +372,21 @@ stdydict['sdy_pace'] = np.nanstd(pydict['py_pace'][ii,:]) stdydict['sdy_lmi'] = np.nanstd(pydict['py_lmi'][ii,:]) stdydict['sdy_latgen'] = np.nanstd(pydict['py_latgen'][ii,:]) - - # Calculate annual averages - aydict['uclim_count'][ii] = np.nansum(pmdict['pm_count'][ii,:]) + + # Calculate annual averages + aydict['uclim_count'][ii] = np.nansum(pmdict['pm_count'][ii,:]) aydict['uclim_tcd'][ii] = np.nansum(xtcd) / nmodyears aydict['uclim_ace'][ii] = np.nansum(xace) / nmodyears aydict['uclim_pace'][ii] = np.nansum(xpace) / nmodyears aydict['uclim_lmi'][ii] = np.nanmean(pydict['py_lmi'][ii,:]) - - # Calculate storm averages + + # Calculate storm averages asdict['utc_tcd'][ii] = np.nanmean(xtcd) asdict['utc_ace'][ii] = np.nanmean(xace) asdict['utc_pace'][ii] = np.nanmean(xpace) asdict['utc_lmi'][ii] = np.nanmean(xlatmi) asdict['utc_latgen'][ii] = np.nanmean(np.absolute(xglat)) - + # Calculate spatial densities, integrals, and min/maxes trackdens, denslat, denslon = track_density(gridsize,0.0,xlat.flatten(),xlon.flatten(),False) trackdens = trackdens/nmodyears @@ -364,10 +394,10 @@ gendens = gendens/nmodyears tcddens, denslat, denslon = track_mean(gridsize,0.0,xlat.flatten(),xlon.flatten(),xtcdpp.flatten(),False,0) tcddens = tcddens/nmodyears - acedens, denslat, denslon = track_mean(gridsize,0.0,xlat.flatten(),xlon.flatten(),xacepp.flatten(),False,0) - acedens = acedens/nmodyears + acedens, denslat, denslon = track_mean(gridsize,0.0,xlat.flatten(),xlon.flatten(),xacepp.flatten(),False,0) + acedens = acedens/nmodyears pacedens, denslat, denslon = track_mean(gridsize,0.0,xlat.flatten(),xlon.flatten(),xpacepp.flatten(),False,0) - pacedens = pacedens/nmodyears + pacedens = pacedens/nmodyears minpres, denslat, denslon = track_minmax(gridsize,0.0,xlat.flatten(),xlon.flatten(),xpres.flatten(),"min",-1) maxwind, denslat, denslon = track_minmax(gridsize,0.0,xlat.flatten(),xlon.flatten(),xwind.flatten(),"max",-1) @@ -390,7 +420,7 @@ msvars = ['fulldens','fullpres','fullwind','fullgen','fullace','fullpace','fulltcd','fulltrackbias','fullgenbias','fullacebias','fullpacebias'] for x in msvars: msdict[x] = np.empty((nfiles, denslat.size, denslon.size)) - + # Store this model's data in the master spatial array msdict['fulldens'][ii,:,:] = trackdens[:,:] msdict['fullgen'][ii,:,:] = gendens[:,:] @@ -403,10 +433,9 @@ msdict['fullgenbias'][ii,:,:] = gendens[:,:] - msdict['fullgen'][0,:,:] msdict['fullacebias'][ii,:,:] = acedens[:,:] - msdict['fullace'][0,:,:] msdict['fullpacebias'][ii,:,:] = pacedens[:,:] - msdict['fullpace'][0,:,:] - + print("-------------------------------------------------------------------------") - - + ### Back to the main program #for zz in pydict: @@ -414,7 +443,7 @@ # pydict[zz] = np.where( pydict[zz] <= 0. , 0. , pydict[zz] ) # pydict[zz] = np.where( np.isnan(pydict[zz]) , 0. , pydict[zz] ) # pydict[zz] = np.where( np.isinf(pydict[zz]) , 0. , pydict[zz] ) - + # Spatial correlation calculations ## Initialize dict @@ -476,13 +505,37 @@ for ii in range(nfiles): taydict["tay_bias2"][ii] = 100. * ( (aydict['uclim_count'][ii] - aydict['uclim_count'][0]) / aydict['uclim_count'][0] ) +print("\nWriting metrics to file...") +# Initialize output json +if cmec_driver: + outjson = create_output_json(wkdir,modeldir) + # Write out primary stats files -write_single_csv(rxydict,strs,'./csv-files/','metrics_'+os.path.splitext(csvfilename)[0]+'_'+strbasin+'_spatial_corr.csv') -write_single_csv(rsdict,strs,'./csv-files/','metrics_'+os.path.splitext(csvfilename)[0]+'_'+strbasin+'_temporal_scorr.csv') -write_single_csv(rpdict,strs,'./csv-files/','metrics_'+os.path.splitext(csvfilename)[0]+'_'+strbasin+'_temporal_pcorr.csv') -write_single_csv(aydict,strs,'./csv-files/','metrics_'+os.path.splitext(csvfilename)[0]+'_'+strbasin+'_climo_mean.csv') -write_single_csv(asdict,strs,'./csv-files/','metrics_'+os.path.splitext(csvfilename)[0]+'_'+strbasin+'_storm_mean.csv') -write_single_csv(stdydict,strs[0],'./csv-files/','means_'+os.path.splitext(csvfilename)[0]+'_'+strbasin+'_climo_mean.csv') +csv_base = os.path.splitext(csvfilename)[0]+'_'+strbasin + +fname = 'metrics_'+csv_base+'_spatial_corr' +desc = ['spatial_corr','spatial correlation','Statistics for spatial correlation table'] +write_single_csv(rxydict,strs,wkdir,fname,desc,cmec_driver) + +fname = 'metrics_'+csv_base+'_temporal_scorr' +desc = ['temporal_scorr', 'temporal spearman rank correlations','Statistics for temporal spearman rank correlation table'] +write_single_csv(rsdict,strs,wkdir,fname,desc,cmec_driver) + +fname = 'metrics_'+csv_base+'_temporal_pcorr' +desc = ['temporal_pcorr','temporal pearson correlations','Statistics for temporal pearson correlation table'] +write_single_csv(rpdict,strs,wkdir,fname,desc,cmec_driver) + +fname = 'metrics_'+csv_base+'_climo_mean' +desc = ['climo_mean bias','climatological bias','Statistics for climatological bias table'] +write_single_csv(aydict,strs,wkdir,fname,desc,cmec_driver) + +fname = 'metrics_'+csv_base+'_storm_mean' +desc = ['storm_mean','storm mean bias','Statistics for storm mean bias table'] +write_single_csv(asdict,strs,wkdir,fname,desc,cmec_driver) + +fname = 'means_'+csv_base+'_climo_mean' +desc = ['climo_mean','climatological mean','Climatological mean statistics of reference dataset'] +write_single_csv(stdydict,strs[0],wkdir,fname,desc,cmec_driver) # Package a series of global package inputs for storage as NetCDF attributes globaldict={} @@ -491,4 +544,4 @@ globaldict[x] = globals()[x] # Write NetCDF file -write_spatial_netcdf(msdict,pmdict,pydict,taydict,strs,nyears,nmonths,denslat,denslon,globaldict) +write_spatial_netcdf(msdict,pmdict,pydict,taydict,strs,nyears,nmonths,denslat,denslon,globaldict,wkdir,cmec_driver) diff --git a/cymep/functions/getSettings.py b/cymep/functions/getSettings.py new file mode 100644 index 0000000..e0e18d6 --- /dev/null +++ b/cymep/functions/getSettings.py @@ -0,0 +1,29 @@ +import json + +def load_settings_from_json(filename): + # Load user settings from a json file + with open(filename) as config_file: + user_settings = json.load(config_file).get("CyMeP") + + # Check settings types + setting_type = { + "basin": int, + "csvfilename": str, + "gridsize": (int,float), + "styr": int, + "enyr": int, + "stmon": int, + "enmon": int, + "truncate_years": bool, + "THRESHOLD_ACE_WIND": (int,float), + "THRESHOLD_PACE_PRES": (int,float), + "do_special_filter_obs": bool, + "do_fill_missing_pw": bool, + "do_defineMIbypres": bool} + + for setting in user_settings: + stype = setting_type[setting] + if not isinstance(user_settings[setting], stype): + raise TypeError("Setting " + setting + " must be of type " + str(stype)) + + return user_settings diff --git a/cymep/functions/output_templates/README.md b/cymep/functions/output_templates/README.md new file mode 100644 index 0000000..2b5ee6c --- /dev/null +++ b/cymep/functions/output_templates/README.md @@ -0,0 +1,5 @@ +### Output Templates + +These files are used to generate the CMEC html landing page and output description JSON. + +If new outputs are added to CyMeP, a description should be added to the template output_desc.json. Figures are organized by folder structure under the key "fig". CSV files are found under "csv-files" and divided by "mean" and "metric" versions. The NetCDF file is under "netcdf-files". An additional "statistics" section holds information for generating descriptions for the metrics in the CSV files. If the new outputs are located in different folders or have a different structure than existing outputs, additional code will likely be needed in ../write_cmec.py as well. diff --git a/cymep/functions/output_templates/html_template.txt b/cymep/functions/output_templates/html_template.txt new file mode 100644 index 0000000..30d41b8 --- /dev/null +++ b/cymep/functions/output_templates/html_template.txt @@ -0,0 +1,12 @@ + + +CyMeP Results +

CyMeP metrics and figures

+

Metrics files

+
$json_metric +
+

Plots

+

$title

+

$plot

+ + \ No newline at end of file diff --git a/cymep/functions/write_cmec.py b/cymep/functions/write_cmec.py new file mode 100644 index 0000000..a831b0e --- /dev/null +++ b/cymep/functions/write_cmec.py @@ -0,0 +1,244 @@ +""" +write_cmec.py + +This script generates figure information for the output.json file that +accompanies the module output. It also creates an html landing page that +links to the output figures and metrics JSONs. + +This is run after the figures are generated in the CyMeP CMEC driver script. + +The CMEC environment variables must be set to run this script successfully. +""" +import csv +import json +import os +from string import Template +import sys +import numpy as np +import xarray as xr + +def define_figures(): + """Store descriptions and longnames of the CyMeP output figures, + organized by subfolder and unique keywords. Each of the top-level + keys will become a subheading on the html page.""" + figure_description = { + "tables":{ + "temporal_scorr": { + "longname": "temporal rank correlation", + "description": "Table of rank correlations for seasonal values of storm count, TCD, ACE, PACE, and LMI."}, + "storm_mean": { + "longname": "storm mean bias", + "description": "Table comparing storm mean bias in TCD, ACE, PACE, latgen, and LMI"}, + "climo_mean": { + "longname": "climatological bias", + "description": "Table comparing mean bias in annually averaged storm count, TCD, ACE, PACE, and LMI"}, + "spatial_corr": { + "longname": "spatial pearson correlation", + "description": "Table of spatial correlations for track location, TC genesis, 10-m wind speed, sea level pressure, ACE, and PACE with the reference dataset."} + }, + "taylor": { + "taylor": { + "longname": "Taylor diagram", + "description": "A Taylor diagram showing aggregated TC statistics for the provided trajectories."} + }, + "line": { + "interann_paceByYear": { + "longname": "pressure ACE interannual cycle", + "description": "Time series of total pressure ACE by year"}, + "interann_aceByYear": { + "longname": "accumulated cyclone energy (ACE) interannual cycle", + "description": "Time series of accumulated cyclone energy by year"}, + "interann_tcdByYear": { + "longname": "tropical cyclone days interannual cycle", + "description": "Time series of tropical cyclone days by year"}, + "interann_stormsByYear": { + "longname": "storm count interannual cycle", + "description": "Time series of storm count by year"}, + "seasonal_paceByMonth": { + "longname": "Seasonal cycle of pressure ACE", + "description": "Line plot of mean pressure ACE by month"}, + "seasonal_aceByMonth": { + "longname": "Seasonal cycle of ACE", + "description": "Line plot of mean accumulated cyclone energy by month"}, + "seasonal_tcdByMonth": { + "longname": "Seasonal cycle of tropical cyclone days", + "description": "Line plot of mean tropical cyclone days by month"}, + "seasonal_stormsByMonth": { + "longname": "Seasonal cycle of storm count", + "description": "Line plot of mean storm count by month"} + }, + "spatial": { + "pacebias": { + "longname": "bias in pressure ACE", + "description": "Spatial plots of pressure ACE bias relative to reference dataset"}, + "acebias": { + "longname": "bias in accumulated cyclone energy", + "description": "Spatial plots of ACE bias relative to reference dataset"}, + "genbias": { + "longname": "bias in genesis location", + "description": "Spatial plots of genesis location bias relative to reference dataset"}, + "trackbias": { + "longname": "track bias", + "description": "Spatial plots of track bias relative to reference dataset"}, + "tcddens": { + "longname": "tropical cyclone day density", + "description": "Spatial plots of tropical cyclone day occurence per year"}, + "acedens": { + "longname": "accumulated cyclone energy density", + "description": "Spatial plots of ACE integrated by grid cell"}, + "pacedens": { + "longname": "pressure ACE density", + "description": "Spatial plots of pressure ACE integrated by grid cell"}, + "gendens": { + "longname": "genesis location density", + "description": "Spatial plots of TC genesis occurence per year"}, + "maxwind": { + "longname": "maximum wind speed", + "description": "Spatial plots of maximum 10-m wind speed over the evaluation period"}, + "minpres": { + "longname": "minimum pressure", + "description": "Spatial plots of minimum pressure over the evaluation period"}, + "trackdens": { + "longname": "track density", + "description": "Spatial plots of track occurence per year"} + } + } + return figure_description + +def populate_filename(data_dict, filename): + """Combine filename and description info for output.json.""" + output_dict = {} + for key in data_dict: + if key in filename: + output_dict[key] = data_dict[key].copy() + output_dict[key]["filename"] = filename + if not output_dict: + print("Warning: Description for figure {0}".format(filename) + + " not found in write_cmec.define_figures()") + output_dict[filename] = {"filename": filename} + return output_dict + +def populate_html_json(html_lines, json_list): + """Populate the loaded html template with json file names.""" + for ind, line in enumerate(html_lines): + if "$json_metric" in line: + json_ind = ind + # Create template line + linetemplate = Template(html_lines[json_ind]) + # Replace template line with actual html for each + # file, making sure to overwrite the original template + count = 0 + for json_name in json_list: + json_dict = {"json_metric": json_name} + if count == 0: + html_lines[json_ind] = linetemplate.substitute(json_dict) + else: + html_lines.insert(json_ind+count, linetemplate.substitute(json_dict)) + count += 1 + return html_lines + +def populate_html_figures(html_lines, fig_dict): + """Populate the loaded html template with figure file names.""" + # Figure out where figure templating starts ($title keyword) + for ind, line in enumerate(html_lines): + if "$title" in line: + title_ind = ind + # Create template lines + titletemplate = Template(html_lines[title_ind]) + linetemplate = Template(html_lines[title_ind+1]) + # Replace template lines with actual html for each figure + count = 0 + for plot_category in fig_dict: + # Sub the folder name into the title line + titledict = {"title": plot_category.capitalize()} + # Since we're pulling lines from the template and then overwriting + # them, we need to keep track of insertions + if count == 0: + html_lines[title_ind] = titletemplate.substitute(titledict) + else: + html_lines.insert(title_ind+count, titletemplate.substitute(titledict)) + count += 1 + # Sub the figure names into the figure lines + for file in fig_dict[plot_category]: + html_dict = {"plot": file, "folder": plot_category} + newfig = linetemplate.substitute(html_dict) + if count == 1: + html_lines[title_ind+count] = newfig + else: + html_lines.insert(title_ind+count,newfig) + count += 1 + return html_lines + + +if __name__ == "__main__": + # Create an html page for exploring CyMeP figures and + # add figure and html information to output.json file. + # This script expect the CMEC environment variables to be set. + print("\nDocumenting figures and generating index.html") + + # Get some file names + wkdir = os.getenv("CMEC_WK_DIR") + codedir = os.getenv("CMEC_CODE_DIR") + figdir = "fig" + output_json = "output.json" + html_file = "index.html" + + # Descriptions for all the different file names + figure_desc = define_figures() + + # Store filenames for writing html later + html_list = {"fig": {}} + + # Get figure filenames organized by subfolder + fig_dict = {} + fig_path = os.path.join(wkdir,figdir) + fig_path_contents = os.listdir(fig_path) + sub_dir_list = [sub_dir for sub_dir in fig_path_contents + if os.path.isdir(os.path.join(fig_path, sub_dir))] + for sub_dir in sub_dir_list: + fig_file_list = os.listdir(os.path.join(fig_path,sub_dir)) + html_list["fig"][sub_dir] = sorted(fig_file_list) # grab file names for html page later + for fig_file in fig_file_list: + fig_file = os.path.join(figdir,sub_dir,fig_file) + tmp = populate_filename(figure_desc[sub_dir], fig_file) + fig_dict.update(tmp) + + # The spatial figure list needs extra sorting + dens = [] + bias = [] + other = [] + for item in html_list["fig"]["spatial"]: + if "dens" in item: + dens.append(item) + elif "bias" in item: + bias.append(item) + else: + other.append(item) + new_spatial_list = dens + bias + other + html_list["fig"]["spatial"] = new_spatial_list + + # Generate html page from template + html_dir = os.path.join(codedir,"cymep/functions/output_templates/html_template.txt") + with open(html_dir, "r") as html_template: + html_lines = html_template.readlines() + + # Add links to all the output images + html_lines = populate_html_figures(html_lines, html_list["fig"]) + # Add links to the output metrics jsons + jsonlist = sorted(os.listdir(wkdir+"/json")) + html_lines = populate_html_json(html_lines, jsonlist) + + # Write html + html_path = os.path.join(wkdir,html_file) + with open(html_path, "w") as index: + index.writelines(html_lines) + + # Add html and figures to output.json + outfile = os.path.join(wkdir,output_json) + with open(outfile, "r") as outfilename: + outjson = json.load(outfilename) + outjson["plots"] = fig_dict + outjson["index"] = "index.html" + outjson["html"] = "index.html" + with open(outfile, "w") as outfilename: + json.dump(outjson, outfilename, indent=2) \ No newline at end of file diff --git a/cymep/functions/write_spatial.py b/cymep/functions/write_spatial.py index 096c1a0..e740507 100644 --- a/cymep/functions/write_spatial.py +++ b/cymep/functions/write_spatial.py @@ -1,23 +1,153 @@ +import json import numpy as np import netCDF4 as nc from datetime import datetime import os -def write_spatial_netcdf(spatialdict,permondict,peryrdict,taydict,modelsin,nyears,nmonths,latin,lonin,globaldict): - +# global name for cmec descriptive json +output_json = "output.json" + +def create_dims(dims_dict, **kwargs): + """Build dimensions dictionary for json from component dictionaries. + Parameters: + dims_dict (dictionary): dictionary to store dimensions in + **kwargs (name/value pairs): sub-dictionaries for each dimension + """ + for key, value in kwargs.items(): + dims_dict.update({key: value}) + return dims_dict + +def set_metric_definitions(metric_list, desc): + """Populates metrics definitions using template dictionary.""" + metric_dict = {} + for metric in metric_list: + pre,suf = metric.split('_') + if (pre in desc["prefix"]) and (suf in desc["suffix"]): + metric_desc = desc["prefix"][pre] + desc["suffix"][suf] + metric_dict[metric] = metric_desc + else: + print("Warning: missing metric description in " + + "write_spatial.define_statistics()" + + " for metric {0} {1}".format(pre,suf)) + metric_dict[metric] = "" + return metric_dict + +def get_dimensions(json_dict, json_structure): + """Populate dimensions details from results dictionary. + Parameters: + json_dict (dictionary): metrics to pull dimension details from + json_structure (list): ordered list of dimension names + """ + keylist = {} + level = 0 + while level < len(json_structure): + if isinstance(json_dict, dict): + first_key = list(json_dict.items())[0][0] + if first_key == "attributes": + first_key = list(json_dict.items())[1][0] + dim = json_structure[level] + keys = {key: {} for key in json_dict if key != "attributes"} + keylist[dim] = keys + json_dict = json_dict[first_key] + level += 1 + return keylist + +def get_env(): + """Return versions of dependencies.""" + import pandas + import scipy + import netCDF4 + import subprocess + + versions = {} + versions['netCDF4'] = netCDF4.__version__ + # numpy is already imported + versions['numpy'] = np.__version__ + versions['pandas'] = pandas.__version__ + versions['scipy'] = scipy.__version__ + ncl = subprocess.check_output(['ncl', '-V']).decode("utf-8").rstrip() + versions['ncl'] = ncl + return versions + +def define_statistics(): + """Define all the statistics we're producing with the 'prefix_suffix' + naming convention. Descriptions needed for the JSON metrics.""" + statistics = { "prefix": { + "sdy": "standard deviation of ", + "uclim": "climatological ", + "rxy": "spatial correlation of ", + "utc": "storm ", + "rp": "temporal pearson correlation of ", + "rs": "temporal spearman rank correlation of ", + "pm": "monthly ", + "tay": "taylor diagram ", + "py": "yearly " + }, + "suffix": { + "count": "storm count", + "tcd": "tropical cyclone days", + "ace": "accumulated cyclone energy", + "pace": "pressure ACE", + "lmi": "latitude of lifetime-maximum intensity", + "latgen": "latitude of cyclone genesis", + "track": "track density", + "gen": "cyclone genesis", + "u10": "maximum 10m wind speed", + "slp": "minmum sea level pressure", + "pc": "pattern correlation", + "ratio": "ratio", + "bias": "bias", + "xmean": "test variable weighted areal average", + "ymean": "reference variable weighted areal average", + "xvar": "test variable weighted areal variance", + "yvar": "test variable weighted areal variance", + "rmse": "root mean square error", + "bias2": "relative bias" + } + } + return statistics + +def create_output_json(wkdir,modeldir): + """Initialize the output.json file that describes module outputs + for cmec-driver""" + log_path = wkdir + "/CyMeP.log.txt" + out_json = {'index': 'index', + 'provenance': {}, + 'plots': {}, + 'html': "index.html", + 'metrics': {}} + out_json["provenance"] = {"environment": get_env(), + "modeldata": modeldir, + "obsdata": None, + "log": log_path} + out_json["data"] = {} + out_json["metrics"] = {} + out_json["plots"] = {} + out_json["html"] = {} + + outfilepath = os.path.join(wkdir,output_json) + if os.path.exists(outfilepath): + os.remove(outfilepath) + + with open(outfilepath,"w") as outfilename: + json.dump(out_json, outfilename, indent=2) + +def write_spatial_netcdf(spatialdict,permondict,peryrdict,taydict,modelsin,nyears,nmonths,latin,lonin,globaldict,wkdir,cmec=False): + # Convert modelsin from pandas to list modelsin=modelsin.tolist() - + # Set up dimensions nmodels=len(modelsin) nlats=latin.size nlons=lonin.size nchar=16 - - netcdfdir="./netcdf-files/" + + netcdfdir=wkdir + "/netcdf-files/" os.makedirs(os.path.dirname(netcdfdir), exist_ok=True) + os.chmod(os.path.dirname(netcdfdir),0o777) netcdfile=netcdfdir+"/netcdf_"+globaldict['strbasin']+"_"+os.path.splitext(globaldict['csvfilename'])[0] - + # open a netCDF file to write ncout = nc.Dataset(netcdfile+".nc", 'w', format='NETCDF4') @@ -59,12 +189,12 @@ def write_spatial_netcdf(spatialdict,permondict,peryrdict,taydict,modelsin,nyear for ii in permondict: vout = ncout.createVariable(ii, 'f', ('model', 'months'), fill_value=1e+20) vout[:] = np.ma.masked_invalid(permondict[ii][:,:]) - + # create variable array for ii in peryrdict: vout = ncout.createVariable(ii, 'f', ('model', 'years'), fill_value=1e+20) vout[:] = np.ma.masked_invalid(peryrdict[ii][:,:]) - + # create variable array for ii in taydict: vout = ncout.createVariable(ii, 'f', ('model'), fill_value=1e+20) @@ -73,21 +203,199 @@ def write_spatial_netcdf(spatialdict,permondict,peryrdict,taydict,modelsin,nyear # Write model names to char model_names = ncout.createVariable('model_names', 'c', ('model', 'characters')) model_names[:] = nc.stringtochar(np.array(modelsin).astype('S16')) - + #today = datetime.today() ncout.description = "Coastal metrics processed data" ncout.history = "Created " + datetime.today().strftime('%Y-%m-%d-%H:%M:%S') for ii in globaldict: ncout.setncattr(ii, str(globaldict[ii])) - + # close files ncout.close() - + if cmec: + # Update metadata in output.json + desc = { + "region": globaldict["strbasin"], + "filename": os.path.relpath(netcdfile,start=wkdir), + "longname": globaldict["strbasin"] + " netcdf output", + "description": "Coastal metrics processed data" + } + with open(os.path.join(wkdir,output_json), "r") as outfilename: + tmp = json.load(outfilename) + tmp["data"].update({"netcdf": desc}) + with open(os.path.join(wkdir,output_json), "w") as outfilename: + json.dump(tmp, outfilename, indent=2) + + # Dump metrics in json format + write_nc_metrics_jsons(permondict,peryrdict,taydict,modelsin,nyears,globaldict,wkdir) + +def write_nc_metrics_jsons(permondict,peryrdict,taydict,modelsin,nyears,globaldict,wkdir): + """Output metrics from netCDF into JSONs.""" + + with open(os.path.join(wkdir,output_json), "r") as outfilename: + outjson = json.load(outfilename) + + os.makedirs(wkdir+"/json",exist_ok=True) + base_file_name=wkdir+"/json/netcdf_"+globaldict['strbasin']+"_"+os.path.splitext(globaldict['csvfilename'])[0] + + # Load descriptions for all the different file names + data_desc = define_statistics() + permon_json_name = base_file_name + "_month.json" + peryr_json_name = base_file_name + "_year.json" + tay_json_name = base_file_name + "_taylor.json" + model_dict = dict.fromkeys(modelsin, {}) + model_count = len(modelsin) + #-----Monthly----- + # Set up metric dimensions + json_month = {"DIMENSIONS": { + "json_structure": ["model", "metric", "month"], "dimensions": {}}} + metric_list = [*permondict] + met_dict = set_metric_definitions(metric_list, data_desc) + month_dict = {"0": "January", "1": "February", "2": "March", "3": "April", + "4": "May", "5": "June", "6": "July", "7": "August", + "8": "September", "9": "October", "10": "November", + "11": "December"} + # Populate RESULTS + results_json = {"RESULTS": {}} + for model_num,model_name in enumerate(modelsin): + results_json["RESULTS"][model_name] = {} + for metric in metric_list: + results_json["RESULTS"][model_name].update({metric: {}}) + for time,value in enumerate(permondict[metric][model_num]): + if np.isnan(value): + value = None + results_json["RESULTS"][model_name][metric].update({time: value}) + # Create other json fields + json_month.update(results_json) + json_month["DIMENSIONS"]["dimensions"].update({"model":model_dict}) + json_month["DIMENSIONS"]["dimensions"].update({"metric": met_dict}) + json_month["DIMENSIONS"]["dimensions"].update({"month": month_dict}) + # Write json + with open(permon_json_name, "w") as mfile: + json.dump(json_month, mfile, indent=2) + # Update entry in metadata json + outjson["metrics"][os.path.basename(permon_json_name)] = { + "longname": "Monthly cyclone metrics", + "filename": os.path.relpath(permon_json_name,start=wkdir), + "description": "monthly cyclone metrics converted from netcdf" + } + #-----Yearly----- + json_year = {"DIMENSIONS": { + "json_structure": ["model", "metric", "year"], "dimensions": {}}} + metric_list = [*peryrdict] + met_dict = set_metric_definitions(metric_list, data_desc) + year_list = [y for y in range(int(globaldict["styr"]), int(globaldict["enyr"])+1)] + # Populate RESULTS + results_json = {"RESULTS": {}} + for model_num,model_name in enumerate(modelsin): + results_json["RESULTS"][model_name] = {} + for metric in metric_list: + results_json["RESULTS"][model_name].update({metric: {}}) + for time,value in enumerate(peryrdict[metric][model_num]): + if np.isnan(value): + value = None + time = str(time + int(globaldict["styr"])) + results_json["RESULTS"][model_name][metric].update({time: value}) + # Create other json fields + json_year.update(results_json) + year_dict = dict.fromkeys(year_list, {}) + json_year.update(results_json) + json_year["DIMENSIONS"]["dimensions"].update({"model":model_dict}) + json_year["DIMENSIONS"]["dimensions"].update({"metric": met_dict}) + json_year["DIMENSIONS"]["dimensions"].update({"year": year_dict}) + # Write json + with open(peryr_json_name, "w") as yfile: + json.dump(json_year, yfile, indent=2) + + # Update entry in metadata json + outjson["metrics"][os.path.basename(peryr_json_name)] = { + "longname": "Yearly cyclone metrics", + "filename": os.path.relpath(peryr_json_name,start=wkdir), + "description": "Yearly cyclone metrics converted from netcdf" + } + + #-----Taylor----- + json_taylor = {"DIMENSIONS": { + "json_structure": ["model", "metric"], "dimensions": {}}} + metric_list = [*taydict] + met_dict = set_metric_definitions(metric_list, data_desc) + # Populate Results + results_json = {"RESULTS": {}} + for model_num,model_name in enumerate(modelsin): + results_json["RESULTS"][model_name] = {} + for metric in metric_list: + metric_dict = taydict[metric][model_num] + if np.isnan(metric_dict): + metric_dict = None + results_json["RESULTS"][model_name].update({metric: metric_dict}) + # Update other fields + json_taylor.update(results_json) + json_taylor["DIMENSIONS"]["dimensions"].update({"model":model_dict}) + json_taylor["DIMENSIONS"]["dimensions"].update({"metric": met_dict}) + # write json + with open(tay_json_name, "w") as tfile: + json.dump(json_taylor, tfile, indent=2) + + # Update entry in metadata json + outjson["metrics"][os.path.basename(tay_json_name)] = { + "longname": "Taylor cyclone metrics", + "filename": os.path.relpath(tay_json_name,start=wkdir), + "description": "Taylor cyclone metrics converted from netcdf" + } + + # Return metadata info + with open(os.path.join(wkdir,output_json), "w") as outfilename: + json.dump(outjson, outfilename, indent=2) + +def write_single_json(vardict,modelsin,wkdir,jsonname,desc): + # CSV files + # This section converts the metrics stored in + # csv files to the CMEC json format. + json_structure = ["model", "metric"] + cmec_json = {"DIMENSIONS": {"json_structure": json_structure}, "RESULTS": {}} + statistics = define_statistics() + # convert all the csv files in csv-files/ to json + if isinstance(modelsin,str): + modelsin = [modelsin] + results = dict.fromkeys(modelsin,{}) + if len(modelsin) > 1: + for ii in vardict: + for model_num,model in enumerate(modelsin): + results[model][ii] = vardict[ii][model_num] + if np.isnan(results[model][ii]): + results[model][ii] = None + else: + for ii in vardict: + results[modelsin[0]][ii] = vardict[ii] + if np.isnan(results[modelsin[0]][ii]): + results[modelsin[0]][ii] = None + dimensions = get_dimensions(results.copy(), json_structure) + metric_list = [key for key in dimensions["metric"]] + dimensions["metric"] = set_metric_definitions(metric_list, statistics) + cmec_json["DIMENSIONS"]["dimensions"] = dimensions + cmec_json["RESULTS"] = results + + jsondir = os.path.join(wkdir,"json") + os.makedirs(jsondir, exist_ok=True) + cmec_file_name = os.path.join(jsondir,jsonname + ".json") + with open(cmec_file_name, "w") as cmecfile: + json.dump(cmec_json, cmecfile, indent=2) + + metric_desc = {desc[0] + " json": { + "longname": desc[1], + "description": desc[2], + "filename": os.path.relpath(cmec_file_name, start=wkdir) + }} + with open(os.path.join(wkdir,output_json),"r") as outfilename: + tmp = json.load(outfilename) + tmp["metrics"].update(metric_desc) + with open(os.path.join(wkdir,output_json),"w") as outfilename: + json.dump(tmp, outfilename, indent=2) def write_dict_csv(vardict,modelsin): # create variable array @@ -102,16 +410,16 @@ def write_dict_csv(vardict,modelsin): np.savetxt(csvfilename, tmp, delimiter=",", fmt="%s") +def write_single_csv(vardict,modelsin,wkdir,csvname,desc,cmec=False): + """Write metrics to csv file. If cmec=True, also write metrics to JSON. + desc is a 3-part list of strings containing 1) unique identifier, + 2) longname 3) description.""" - - - - -def write_single_csv(vardict,modelsin,csvdir,csvname): # create variable array - os.makedirs(os.path.dirname(csvdir), exist_ok=True) - csvfilename = csvdir+"/"+csvname - + csvdir = os.path.join(wkdir,'csv-files') + os.makedirs(csvdir, exist_ok=True) + csvfilename = csvdir+"/"+csvname+".csv" + # If a single line CSV with one model if np.isscalar(modelsin): tmp = np.empty((1,len(vardict))) @@ -121,29 +429,45 @@ def write_single_csv(vardict,modelsin,csvdir,csvname): headerstr=headerstr+","+ii tmp[0,iterix]=vardict[ii] iterix += 1 - + # Create a dummy numpy string array of "labels" with the control name to append as column #1 labels = np.empty((1,1),dtype=" taylor_diagram: Part 1 ") - - rxy = True + + rxy = True rxy@gsnDraw = False rxy@gsnFrame = False if (outlier) then @@ -117,19 +117,19 @@ begin rxy@vpYF = 0.80 end if rxy@vpHeightF = 0.65 - rxy@vpWidthF = 0.65 + rxy@vpWidthF = 0.65 rxy@tmYLBorderOn = False rxy@tmXBBorderOn = False rxy@tiYAxisString = "Standardized Deviations (Normalized)" - rxy@tiYAxisFontHeightF= FontHeightF ; default=0.025 - - rxy@tmXBMode = "Explicit" + rxy@tiYAxisFontHeightF= FontHeightF ; default=0.025 + + rxy@tmXBMode = "Explicit" rxy@tmXBValues = (/0.0,0.25,0.50,0.75,1.00,1.25,1.5,1.75/) ; major tm ; default "OBS" or "REF" ;rxy@tmXBLabels = (/"0.00","0.25","0.50","0.75","REF" ,"1.25","1.50"/) rxy@tmXBLabels = (/" ","0.25","0.50","0.75","PERF" ,"1.25","1.50","1.75"/) - if (rOpts .and. isatt(rOpts,"OneX") ) then ; eg: rOpts@OneX="1.00" + if (rOpts .and. isatt(rOpts,"OneX") ) then ; eg: rOpts@OneX="1.00" ;rxy@tmXBLabels = (/"0.00","0.25","0.50","0.75",rOpts@OneX,"1.25","1.50"/) rxy@tmXBLabels = (/" ","0.25","0.50","0.75",rOpts@OneX,"1.25","1.50","1.75"/) end if @@ -143,7 +143,7 @@ begin rxy@tmYLMinorOn = False rxy@tmYLMajorLengthF = rxy@tmXBMajorLengthF rxy@tmYLLabelFontHeightF = FontHeightF - rxy@tmYLMode = "Explicit" + rxy@tmYLMode = "Explicit" rxy@tmYLValues = (/0.0, .25,0.50, 0.75, 1.00, 1.25, 1.5, 1.75/) ; major tm rxy@tmYLLabels = (/"0.00","0.25","0.50","0.75","1.00","1.25","1.50","1.75"/) ;rxy@tmYLLabels = (/" ","0.25","0.50","0.75","1.00","1.25","1.50"/) @@ -163,7 +163,7 @@ begin ; create outer 'correlation axis' npts = 200 ; arbitrary - xx = fspan(xyMin,xyMax,npts) + xx = fspan(xyMin,xyMax,npts) yy = sqrt(xyMax^2 - xx^2 ) ; outer correlation line (xyMax) sLabels = (/"0.0","0.1","0.2","0.3","0.4","0.5","0.6" \ ; correlation labels @@ -171,14 +171,14 @@ begin cLabels = stringtofloat(sLabels) rad = 4.*atan(1.0)/180. angC = acos(cLabels)/rad ; angles: correlation labels - + if (rOpts .and. isatt(rOpts,"tiMainString")) then rxy@tiMainString = rOpts@tiMainString ;rxy@tiMainOffsetYF = 0.015 ; default 0.0 if (isatt(rOpts,"tiMainFontHeightF")) then rxy@tiMainFontHeightF = rOpts@tiMainFontHeightF else - rxy@tiMainFontHeightF = 0.0225 ; default 0.025 + rxy@tiMainFontHeightF = 0.0225 ; default 0.025 end if end if if (rOpts .and. isatt(rOpts,"gsnCenterString")) then @@ -195,22 +195,22 @@ begin dum1 = gsn_add_polyline(wks,taylor,(/0.,xyMax/),(/0., 0. /), rsrRes) xx = fspan(xyMin, xyOne ,npts) ; draw 1.0 standard radius - yy = sqrt(xyOne - xx^2) + yy = sqrt(xyOne - xx^2) rsrRes@gsLineDashPattern = 1 ; dashed line pattern rsrRes@gsLineThicknessF = rxy@xyLineThicknesses(0) ; line thickness dum2 = gsn_add_polyline(wks,taylor,xx,yy, rsrRes) delete(xx) delete(yy) - + if (rOpts .and. isatt(rOpts,"stnRad") ) then - rsrRes@gsLineThicknessF = 1 ; rxy@xyLineThicknesses(0) + rsrRes@gsLineThicknessF = 1 ; rxy@xyLineThicknesses(0) nStnRad = dimsizes(rOpts@stnRad) dum3 = new(nStnRad,graphic) do n=0,nStnRad-1 rr = rOpts@stnRad(n) - xx = fspan(xyMin, rr ,npts) - yy = sqrt(rr^2 - xx^2) + xx = fspan(xyMin, rr ,npts) + yy = sqrt(rr^2 - xx^2) dum3(n) = gsn_add_polyline(wks,taylor,xx,yy, rsrRes) end do taylor@$unique_string("dum")$ = dum3 @@ -239,28 +239,28 @@ begin yC = yC + 0.060*sin(rad*angC) txRes = True ; text mods desired - txRes@txFontHeightF = FontHeightF ; match YL + txRes@txFontHeightF = FontHeightF ; match YL txRes@tmYLLabelFont = tmYLLabelFont ; match YL txRes@txAngleF = -50. - if (.not.isatt(rOpts,"drawCorLabel") .or. rOpts@drawCorLabel) then + if (.not.isatt(rOpts,"drawCorLabel") .or. rOpts@drawCorLabel) then nudgecoef = 1.13 ; CMZ, was = 1.0 by default, used to "push" correlation label away from origin dum4 = gsn_add_text(wks,taylor,"Correlation",1.40*1.13,1.20*1.13,txRes) taylor@$unique_string("dum")$ = dum4 end if - txRes@txAngleF = 0.0 + txRes@txAngleF = 0.0 txRes@txFontHeightF = FontHeightF*0.50 ; bit smaller ;;dum0 = gsn_add_text(wks,taylor,"OBSERVED",1.00,0.075,txRes) plRes = True plRes@gsLineThicknessF = 2. - + txRes@txJust = "CenterLeft" ; Default="CenterCenter". - txRes@txFontHeightF = FontHeightF ; match YL + txRes@txFontHeightF = FontHeightF ; match YL ;txRes@txBackgroundFillColor = "white" tmEnd = 0.975 - radTM = xyMax*tmEnd ; radius end: major TM + radTM = xyMax*tmEnd ; radius end: major TM xTM = new( 2 , "float") yTM = new( 2 , "float") @@ -272,22 +272,22 @@ begin dum5(i) = gsn_add_text(wks, taylor, sLabels(i),xC(i),yC(i),txRes) ; cor label xTM(0) = xyMax*cos(angC(i)*rad) ; major tickmarks at yTM(0) = xyMax*sin(angC(i)*rad) ; correlation labels - xTM(1) = radTM*cos(angC(i)*rad) + xTM(1) = radTM*cos(angC(i)*rad) yTM(1) = radTM*sin(angC(i)*rad) dum6(i) = gsn_add_polyline(wks,taylor,xTM,yTM,plRes) end do ; minor tm locations - mTM = (/0.05,0.15,0.25,0.35,0.45,0.55,0.65 \ + mTM = (/0.05,0.15,0.25,0.35,0.45,0.55,0.65 \ ,0.75,0.85,0.91,0.92,0.93,0.94,0.96,0.97,0.98 /) angmTM = acos(mTM)/rad ; angles: correlation labels - radmTM = xyMax*(1.-(1.-tmEnd)*0.5) ; radius end: minor TM + radmTM = xyMax*(1.-(1.-tmEnd)*0.5) ; radius end: minor TM dum7 = new(dimsizes(mTM),graphic) do i=0,dimsizes(mTM)-1 ; manually add tm xTM(0) = xyMax*cos(angmTM(i)*rad) ; minor tickmarks yTM(0) = xyMax*sin(angmTM(i)*rad) - xTM(1) = radmTM*cos(angmTM(i)*rad) + xTM(1) = radmTM*cos(angmTM(i)*rad) yTM(1) = radmTM*sin(angmTM(i)*rad) dum7(i) = gsn_add_polyline(wks,taylor,xTM,yTM,plRes) end do @@ -309,7 +309,7 @@ begin end do taylor@$unique_string("dum")$ = dum8 end if - + ; ---------------------------------------------------------------- ; Part 3: ; Concentric about 1.0 on XB axis @@ -321,9 +321,9 @@ begin .and. rOpts@centerDiffRMS) then respl = True ; polyline mods desired respl@gsLineThicknessF = 1.0 ; line thickness - respl@gsLineColor = "grey30" ; line color + respl@gsLineColor = "grey30" ; line color respl@gsLineDashPattern = 2 ; short dash lines - + dx = 0.25 ncon = 4 ; 0.75, 0.50, 0.25, 0.0 npts = 100 ; arbitrary @@ -331,7 +331,7 @@ begin dum9 = new(ncon,graphic) - do n=1,ncon + do n=1,ncon rr = n*dx ; radius from 1.0 [OBS] abscissa xx = 1. + rr*cos(ang) yy = fabs( rr*sin(ang) ) @@ -339,13 +339,13 @@ begin dum9(n-1) = gsn_add_polyline(wks,taylor,xx,yy,respl) end if ;if (n.eq.3) then - ; ;n3 = floattointeger( 0.77*npts ) - ; n3 = floattointeger( 0.9*npts ) + ; ;n3 = floattointeger( 0.77*npts ) + ; n3 = floattointeger( 0.9*npts ) ; dum9(n-1) = gsn_add_polyline(wks,taylor,xx(0:n3),yy(0:n3),respl) ;end if if (n.eq.4) then - ;n4 = floattointeger( 0.61*npts ) - n4 = floattointeger( 0.74*npts ) + ;n4 = floattointeger( 0.61*npts ) + n4 = floattointeger( 0.74*npts ) dum9(n-1) = gsn_add_polyline(wks,taylor,xx(0:n4),yy(0:n4),respl) end if end do @@ -358,7 +358,7 @@ begin ; --------------------------------------------------------------- ; Part 4: ; generic resources that will be applied to all users data points -; of course, these can be changed +; of course, these can be changed ; http://www.ncl.ucar.edu/Document/Graphics/Resources/gs.shtml ; --------------------------------------------------------------- @@ -392,14 +392,14 @@ begin gsRes = True gsRes@gsMarkerThicknessF = gsMarkerThicknessF ; default=1.0 - gsRes@gsMarkerSizeF = gsMarkerSizeF ; Default: 0.007 + gsRes@gsMarkerSizeF = gsMarkerSizeF ; Default: 0.007 ptRes = True ; text options for points ptRes@txJust = "BottomCenter"; Default="CenterCenter". ptRes@txFontThicknessF = 1.2 ; default=1.00 ptRes@txFontHeightF = 0.0125 ; default=0.05 if (rOpts .and. isatt(rOpts,"txMarkerLabelHeight")) then - ptRes@txFontHeightF = rOpts@txMarkerLabelHeight + ptRes@txFontHeightF = rOpts@txMarkerLabelHeight end if markerTxYOffset = 0.0175 ; default @@ -409,14 +409,14 @@ begin dum10 = new((nCase*nVar),graphic) dum11 = dum10 - dum12 = dum10 + dum12 = dum10 ;; Up-front info. for bias labelling. BiasMarkerScale = (/0.9,0.7,1.0,1.4,1.9/) BiasLabels = (/"<5%","5-10%","10-20%","20-50%",">50%"/) BiasLevels = (/5.,10.,20.,50./) - + if (outlier .and. any(CC.lt.0 .or. RATIO.gt.xyMax)) then darr = ndtooned(CC) ; count up the # of outliers darr = 0. @@ -431,8 +431,8 @@ begin end if delete(tttt) noutlier = num(darr.eq.1) ; number of outliers - delete(darr) - + delete(darr) + dum14 = new((/noutlier/),graphic) dum15 = new((/noutlier,4/),graphic) @@ -441,15 +441,15 @@ begin stRes@txFontThicknessF = 1.2 ; default=1.00 stRes@txFontHeightF = 0.0125 ; default=0.05 if (rOpts .and. isatt(rOpts,"txFontHeightF")) then - stRes@txFontHeightF = rOpts@txFontHeightF + stRes@txFontHeightF = rOpts@txFontHeightF end if stmarkerTxYOffset= 0.04 dres = True dres@vpWidthF = 0.61 ; dres@vpHeightF = 0.05 -; dres@trYMinF = 0.75 - +; dres@trYMinF = 0.75 + nRowOut = min((/ (noutlier-1)/6 , 3/)) +1 if (rOpts.and.isatt(rOpts,"maxOutlier")) then nRowOut = min((/ (rOpts@maxOutlier-1)/6 , 3/)) +1 @@ -467,7 +467,7 @@ begin dres@tmXBOn = False ; Turn off top tick marks. dres@gsnFrame = False dres@gsnDraw = False - dplot = gsn_blank_plot(wks,dres) + dplot = gsn_blank_plot(wks,dres) end if ;-------------------------------------------------------------------- @@ -487,30 +487,30 @@ begin new_index_up = NhlNewMarker(wks, zmstring, zfontnum, zxoffset, zyoffset, zratio, zsize, zangle) do n=0,nCase-1 - gsRes@gsMarkerIndex = Markers(n) ; marker style + gsRes@gsMarkerIndex = Markers(n) ; marker style gsRes@txFontColor = Colors(n) ; marker color ptRes@txFontColor = Colors(n) ; make font same color - gsRes@gsMarkerColor = Colors(n) + gsRes@gsMarkerColor = Colors(n) do i=0,nVar-1 ; Marker type (index) - if (.not.ismissing(BIAS(n,i))) then + if (.not.ismissing(BIAS(n,i))) then if (BIAS(n,i).ge.0) then ; % ;gsRes@gsMarkerIndex = 7 ; up-triangle gsRes@gsMarkerIndex = new_index_up end if - + ;if (BIAS(n,i).lt.1.) then if (BIAS(n,i).lt.0) then ; % ;gsRes@gsMarkerIndex = 8 ; down-triangle gsRes@gsMarkerIndex = new_index_down end if - + if (abs(BIAS(n,i)).le.BiasLevels(0)) then ; % gsRes@gsMarkerIndex = 4 ; hollow_circle - end if - + end if + scaleMarkerSize = BiasMarkerScale(0) ; For the 'near obs.' classification. if (abs(BIAS(n,i)).gt.BiasLevels(0) .and. abs(BIAS(n,i)).le.BiasLevels(1)) then @@ -526,7 +526,7 @@ begin scaleMarkerSize = BiasMarkerScale(4) end if - gsRes@gsMarkerSizeF = gsMarkerSizeF*scaleMarkerSize + gsRes@gsMarkerSizeF = gsMarkerSizeF*scaleMarkerSize ;print(sprintf("%6.2f",BIAS(n,i))+" "+gsRes@gsMarkerIndex \ ; +" "+sprintf("%6.4f",gsRes@gsMarkerSizeF) \ ; +" "+scaleMarkerSize) @@ -536,11 +536,11 @@ begin ; ptRes@txPerimSpaceF = 0.2 ; ptRes@txPerimThicknessF = 0.2 ; ptRes@txBackgroundFillColor = Colors(n) - ; ptRes@txFontColor = "White" + ; ptRes@txFontColor = "White" ;end if - if (outlier .and. (CC(n,i).lt.0 .or. RATIO(n,i).gt.xyMax)) then -; print("X="+X(n,i)+", Y="+Y(n,i)+" RATIO="+RATIO(n,i)+", CC="+CC(n,i)) + if (outlier .and. (CC(n,i).lt.0 .or. RATIO(n,i).gt.xyMax)) then +; print("X="+X(n,i)+", Y="+Y(n,i)+" RATIO="+RATIO(n,i)+", CC="+CC(n,i)) if (scntr.le.5) then yval = .9 xval = .05+(scntr*.17) @@ -550,7 +550,7 @@ begin xval = .05+((scntr-6)*.17) end if if (scntr.ge.12.and.scntr.le.17) then - yval = .38 + yval = .38 xval = .05+((scntr-12)*.17) end if if (scntr.ge.18.and.scntr.le.23) then @@ -559,50 +559,50 @@ begin end if if (scntr.ge.24) then print("Non-fatal error: More than 24 cases have correlations < 0 and/or ratios > 1.65. Only 20 cases will be shown beneath the taylor diagram.") - end if + end if - ;stRes@txFontHeightF = 0.01 + ;stRes@txFontHeightF = 0.01 cmzyoffset=-0.05 - dum14(scntr) = gsn_add_polymarker(wks,dplot,xval,yval-.05+cmzyoffset,gsRes) - dum15(scntr,0) = gsn_add_text(wks,dplot,(i+1),xval,yval+stmarkerTxYOffset-.02+cmzyoffset,ptRes) + dum14(scntr) = gsn_add_polymarker(wks,dplot,xval,yval-.05+cmzyoffset,gsRes) + dum15(scntr,0) = gsn_add_text(wks,dplot,(i+1),xval,yval+stmarkerTxYOffset-.02+cmzyoffset,ptRes) dum15(scntr,1) = gsn_add_text(wks,dplot,sprintf("%3.2f",RATIO(n,i)),xval+.08,yval+stmarkerTxYOffset-.02+cmzyoffset,stRes) ;dum15(scntr,2) = gsn_add_text(wks,dplot,"____",xval+.05,yval+stmarkerTxYOffset-.04+cmzyoffset,stRes) - dum15(scntr,3) = gsn_add_text(wks,dplot,sprintf("%3.2f",CC(n,i)),xval+.08,yval+stmarkerTxYOffset-.13+cmzyoffset ,stRes) + dum15(scntr,3) = gsn_add_text(wks,dplot,sprintf("%3.2f",CC(n,i)),xval+.08,yval+stmarkerTxYOffset-.13+cmzyoffset ,stRes) scntr = scntr+1 - else - dum11(n*nVar+i) = gsn_add_polymarker(wks,taylor,X(n,i),Y(n,i),gsRes) + else + dum11(n*nVar+i) = gsn_add_polymarker(wks,taylor,X(n,i),Y(n,i),gsRes) dum12(n*nVar+i) = gsn_add_text(wks,taylor,(i+1),X(n,i),Y(n,i)+markerTxYOffset,ptRes) - end if - + end if + ptRes@txFontColor = Colors(n) ptRes@txBackgroundFillColor = "Transparent" end if ; .not.ismissing(BIAS(n,1)) end do - end do + end do if (outlier) then amres0 = True amres0@amJust = "BottomCenter" - amres0@amParallelPosF = -0.03 - amres0@amOrthogonalPosF = 0.65+((nRowOut-1)*.08) + amres0@amParallelPosF = -0.03 + amres0@amOrthogonalPosF = 0.65+((nRowOut-1)*.08) annoid0 = gsn_add_annotation(taylor,dplot,amres0) ; add legend to plot - end if + end if ; --------------------------------------------------------------- ; Part 4a: add bias legend ; --------------------------------------------------------------- ; Add bias sizing key to plot - + lgres = True lgres@lgPerimOn = False ; turn off perimeter lgres@lgMonoMarkerSize = False lgres@lgMonoMarkerColor = True - lgres@vpWidthF = 0.12 + lgres@vpWidthF = 0.12 lgres@vpHeightF = 0.15 lgres@lgMarkerColor = "Black" ; colors of markers - lgres@lgMarkerIndexes = (/4,new_index_down,new_index_down,new_index_down,new_index_down/) ; Markers + lgres@lgMarkerIndexes = (/4,new_index_down,new_index_down,new_index_down,new_index_down/) ; Markers lgres@lgMarkerSizes = BiasMarkerScale*gsMarkerSizeF ; Marker size lgres@lgItemType = "Markers" ; draw markers only lgres@lgLabelFontHeightF = 0.07 ; font height of legend case labels @@ -610,35 +610,35 @@ begin ; lgres@lgTitleOffsetF = lgres@lgTitleString = " - / +" lgres@lgLabelsOn = False - - + + ; Down triangles - no text lbid = gsn_create_legend(wks,5,BiasLabels,lgres) - + amres = True - amres@amParallelPosF = 0.35 ; x axis (positive to the right) - amres@amOrthogonalPosF = -0.38 ; y axis (positive down) + amres@amParallelPosF = 0.35 ; x axis (positive to the right) + amres@amOrthogonalPosF = -0.38 ; y axis (positive down) annoid1 = gsn_add_annotation(taylor,lbid,amres) ; add legend to plot -; Up triangles - with text +; Up triangles - with text lgres@lgLabelsOn = True lgres@lgTitleString = " Bias" lgres@lgMarkerIndexes(1:4) = (/new_index_up,new_index_up,new_index_up,new_index_up/) lbid = gsn_create_legend(wks,5,BiasLabels,lgres) - - amres@amParallelPosF = amres@amParallelPosF+0.07 + + amres@amParallelPosF = amres@amParallelPosF+0.07 annoid2 = gsn_add_annotation(taylor,lbid,amres) ; add legend to plot delete(lgres) ; --------------------------------------------------------------- -; Part 5: add case legend and variable labels +; Part 5: add case legend and variable labels ; --------------------------------------------------------------- ;print("============> taylor_diagram: Part 5 ") - if (rOpts .and. isatt(rOpts,"caseLabels")) then + if (rOpts .and. isatt(rOpts,"caseLabels")) then if (isatt(rOpts,"caseLabelsXYloc")) then caseXloc = rOpts@caseLabelsXYloc(0) caseYloc = rOpts@caseLabelsXYloc(1) @@ -650,12 +650,12 @@ begin if (isatt(rOpts,"caseLabelsFontHeightF")) then caseLabelsFontHeightF = rOpts@caseLabelsFontHeightF else - caseLabelsFontHeightF = 0.05 + caseLabelsFontHeightF = 0.05 end if lgres = True lgres@lgMarkerColors = Colors ; colors of markers - lgres@lgMarkerIndexes = (/16,16,16,16/) ; Markers + lgres@lgMarkerIndexes = (/16,16,16,16/) ; Markers lgres@lgMarkerSizeF = gsMarkerSizeF ; Marker size lgres@lgItemType = "Markers" ; draw markers only lgres@lgLabelFontHeightF = caseLabelsFontHeightF ; font height of legend case labels @@ -669,19 +669,19 @@ begin lgres@lgLabelPosition = "Center" if (isatt(rOpts,"legendHeight")) then lgres@vpHeightF = rOpts@legendHeight - else + else lgres@vpHeightF = 0.030*nCase ; height of legend (NDC) end if lgres@lgPerimOn = False ; turn off perimeter lbid = gsn_create_legend(wks,nCase," "+rOpts@caseLabels,lgres) - + amres = True - amres@amParallelPosF = 0.28 - amres@amOrthogonalPosF = -0.40 + amres@amParallelPosF = 0.28 + amres@amOrthogonalPosF = -0.40 annoid3 = gsn_add_annotation(taylor,lbid,amres) ; add legend to plot end if - if (rOpts .and. isatt(rOpts,"varLabels")) then + if (rOpts .and. isatt(rOpts,"varLabels")) then nVar = dimsizes(rOpts@varLabels) if (isatt(rOpts,"varLabelsFontHeightF")) then @@ -694,7 +694,7 @@ begin txres@txFontHeightF = varLabelsFontHeightF txres@txJust = "CenterLeft" ; justify to the center left - delta_y = 0.075 + delta_y = 0.075 if (rOpts .and. isatt(rOpts,"varLabelsYloc")) then ys = rOpts@varLabelsYloc ; user specified else @@ -703,7 +703,7 @@ begin xs=0.10 - do i = 1,nVar + do i = 1,nVar if (i.eq.1) then dum13 = new(nVar,graphic) end if @@ -731,18 +731,18 @@ begin taylor@$unique_string("dum")$ = dum14 ; outlier polymarker taylor@$unique_string("dum")$ = dum15 ; outlier text end if - + ;print("============> taylor_diagram: Part 6 ") if (.not.isatt(rOpts,"taylorDraw") .or. \ - (isatt(rOpts,"taylorDraw") .and. rOpts@taylorDraw)) then + (isatt(rOpts,"taylorDraw") .and. rOpts@taylorDraw)) then draw(taylor) end if ;print("============> taylor_diagram: Part 7 ") if (.not.isatt(rOpts,"taylorFrame") .or. \ - (isatt(rOpts,"taylorFrame") .and. rOpts@taylorFrame)) then + (isatt(rOpts,"taylorFrame") .and. rOpts@taylorFrame)) then frame(wks) end if @@ -761,8 +761,11 @@ begin f = addfile(ncfile,"r") -out_type="pdf" -out_dir="./fig/taylor/" +wkdir=getenv("CMEC_WK_DIR") +if (any(ismissing(wkdir))) then + wkdir="." +end if +out_dir=wkdir+"/fig/taylor/" system("mkdir -p "+out_dir) strbasin=tostring(f@strbasin) @@ -799,7 +802,7 @@ res@Colors = (/"blue","red"/) res@varLabels = taylor_models res@centerDiffRMS = True res@stnRad = (/ 0.5,1.5 /) ; additional standard radii -res@ccRays = (/ 0.6,0.9 /) ; correlation rays +res@ccRays = (/ 0.6,0.9 /) ; correlation rays res@varLabelsYloc = 1.7 ;res@drawCorLabel=False plot = taylor_diagram_cam(wks, taylor_rat, taylor_cco, taylor_bia2, res) diff --git a/cymep/plotting/plot-temporal.ncl b/cymep/plotting/plot-temporal.ncl index a7728c0..657c23c 100644 --- a/cymep/plotting/plot-temporal.ncl +++ b/cymep/plotting/plot-temporal.ncl @@ -1,7 +1,10 @@ f = addfile(ncfile,"r") -out_type="pdf" -out_dir="./fig/line/" +wkdir=getenv("CMEC_WK_DIR") +if (any(ismissing(wkdir))) then + wkdir = "." +end if +out_dir=wkdir+"/fig/line/" system("mkdir -p "+out_dir) models = chartostring(f->model_names) @@ -28,17 +31,17 @@ lineunitsstr=(/"number","days","10~S~-4~N~ kn~S~2~N~","10~S~-4~N~ kn~S~2~N~","nu do mm = 0,dimsizes(linepltvarsstr)-1 DOPLOT=True - + toPlot=f->$linepltvars(mm)$ if (isStrSubset(linepltvars(mm),"pm_")) then print("Seasonal plots!") linefilelabstr="seasonal" - else + else print("Interannual plots!") linefilelabstr="interann" end if - + plot_dims = dimsizes(toPlot) if (plot_dims(1) .le. 1) then print("ONLY 1 MONTH/YEAR, CANNOT PLOT LINE PLOT") @@ -46,7 +49,7 @@ do mm = 0,dimsizes(linepltvarsstr)-1 end if if (DOPLOT) then - wks = gsn_open_wks("pdf",out_dir+"/"+linefilelabstr+"_"+tostring(linepltvarsstr(mm))+"."+strbasin+"_"+filename) + wks = gsn_open_wks(out_type,out_dir+"/"+linefilelabstr+"_"+tostring(linepltvarsstr(mm))+"."+strbasin+"_"+filename) ; draw xy curves res = True ; plot mods desired @@ -59,19 +62,19 @@ do mm = 0,dimsizes(linepltvarsstr)-1 res@xyLineColors = linecolors ; we have to use linecolors here since missing data included res@tiYAxisString = linepltvarsstr(mm)+" ("+lineunitsstr(mm)+")" - if (isStrSubset(linepltvarsstr(mm),"Month")) then + if (isStrSubset(linepltvarsstr(mm),"Month")) then res@tiMainString = linepltvarsstr(mm)+" seasonal cycle" res@tiXAxisString = "Month" res@trXMinF = 1 res@trXMaxF = 12 plot = gsn_csm_xy (wks,monArr,toPlot,res) ; create plot - else - res@tiMainString = linepltvarsstr(mm)+" interannual cycle" + else + res@tiMainString = linepltvarsstr(mm)+" interannual cycle" res@tiXAxisString = "Year" res@trXMinF = styr res@trXMaxF = enyr plot = gsn_csm_xy (wks,yearArr,toPlot,res) ; create plot - end if + end if ; Build legend textres=True diff --git a/settings.json b/settings.json new file mode 100644 index 0000000..0ab8675 --- /dev/null +++ b/settings.json @@ -0,0 +1,54 @@ +{ + "obslist": { + "obs_name": "IBTrACS", + "frequency": "6hr", + "version": "4", + "description": "TC track data from the International Best Track Archive for Climate Stewardship (IBTrACS). See website at https://www.ncdc.noaa.gov/ibtracs/" + }, + "settings": { + "async": "none", + "description": "A software suite for the objective evaluation of tropical cyclones in gridded climate data", + "driver": "coastal-storm-metrics.sh", + "name": "CyMeP", + "long_name": "Cyclone Metrics Package", + "runtime": {"CyMeP": "1.0", "python3": ["numpy", "pandas", "scipy", "netCDF4"], "ncl": "6.6.2"} + }, + "varlist": { + "slp": { + "name": "sea level pressure", + "units": "Pa", + "frequency": "6hr" + }, + "wind": { + "name": "wind speed", + "units": "m s-1", + "frequency": "6hr" + }, + "phis": { + "name": "surface geopotential", + "units": "m2 s-2", + "frequency": "6hr" + } + }, + "coordinates": { + "min_time_range": "6hr", + "max_time_range": "6hr", + "min_resolution": "any", + "max_resolution": "any" + }, + "default_parameters": { + "basin": -1, + "csvfilename": "rean_configs.csv", + "gridsize": 8.0, + "styr": 1980, + "enyr": 2018, + "stmon": 1, + "enmon": 12, + "truncate_years": false, + "THRESHOLD_ACE_WIND": -1.0, + "THRESHOLD_PACE_PRES": -100.0, + "do_special_filter_obs": true, + "do_fill_missing_pw": true, + "do_defineMIbypres": false + } +} From cff7ede6aa0606d91a35cf96e312476b60afbcd9 Mon Sep 17 00:00:00 2001 From: Ana Ordonez Date: Mon, 14 Jun 2021 13:18:16 -0700 Subject: [PATCH 2/9] add env file --- cymep.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 cymep.yml diff --git a/cymep.yml b/cymep.yml new file mode 100644 index 0000000..81d8067 --- /dev/null +++ b/cymep.yml @@ -0,0 +1,10 @@ +name: _CMEC_cymep +channels: + - conda-forge +dependencies: + - ncl + - netCDF4 + - numpy + - pandas + - python>=3.6 + - scipy From ca9af74d3418cf101796f54f9f32e1adeee6e675 Mon Sep 17 00:00:00 2001 From: Ana Ordonez Date: Mon, 14 Jun 2021 13:20:19 -0700 Subject: [PATCH 3/9] add env activation --- coastal-storm-metrics.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/coastal-storm-metrics.sh b/coastal-storm-metrics.sh index e95e437..ef82fe3 100755 --- a/coastal-storm-metrics.sh +++ b/coastal-storm-metrics.sh @@ -1,5 +1,7 @@ #!/bin/bash # This is the driver script for running CyMeP via cmec-driver +source $CONDA_SOURCE +conda activate $CONDA_ENV_ROOT/_CMEC_cymep echo "Running Cyclone Metrics Package" cymep_log=${CMEC_WK_DIR}/CyMeP.log.txt From d575a892c7b71514817beb8aa68cea28620e0412 Mon Sep 17 00:00:00 2001 From: Ana Ordonez Date: Mon, 14 Jun 2021 13:29:43 -0700 Subject: [PATCH 4/9] update cmec-driver instructions --- README.md | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index d762972..75c41ff 100644 --- a/README.md +++ b/README.md @@ -136,11 +136,16 @@ This will produce a suite of figures in various subfolders within `./fig/`. ### Run with CMEC driver -An alternative workflow is available with [cmec-driver](https://github.com/cmecmetrics/cmec-driver). The workflow is: +An alternative workflow is available with [cmec-driver](https://github.com/cmecmetrics/cmec-driver) and conda. The workflow is: 1. Clone the coastal-storm-metrics repo. -2. Install dependencies. +2. Use conda to dependencies in an environment called "_CMEC_cymep". This can be done using the provided yml file: +`conda env create --file cymep.yml` 3. From the `cmec-driver` directory, register CyMeP in the cmec library: -`python cmec-driver.py register ` +`python cmec-driver.py register ` + - If you have not run cmec-driver before, you must also register your conda installation information: + `python cmec-driver.py setup -conda_source -env_dir ` + For a standard anaconda or miniconda installation, this might look like: + `python cmec-driver.py setup -conda_source ~/miniconda3/etc/profile.d/conda.sh -env_dir ~/miniconda3/envs/` 4. Add TempestExtremes ASCII trajectories to `cmec-driver/model`. - For testing, copy `cymep/trajs/*` to `cmec-driver/model` 5. Create configuration csv in cmec-driver/model. @@ -149,4 +154,4 @@ An alternative workflow is available with [cmec-driver](https://github.com/cmecm - For testing, use the default settings. 7. Run CyMeP module from cmec-driver: `python cmec-driver.py run model/ output/ CyMeP` -8. Open `cmec-driver/output/CyMeP/index.html` to view results. +8. Open `/output/CyMeP/index.html` to view results. From b73a68834ab62b675ac0d9b52a5b620648f1b189 Mon Sep 17 00:00:00 2001 From: Ana Ordonez Date: Mon, 14 Jun 2021 14:14:50 -0700 Subject: [PATCH 5/9] move html into function --- cymep/functions/output_templates/README.md | 5 ---- .../output_templates/html_template.txt | 12 --------- cymep/functions/write_cmec.py | 27 ++++++++++++++++--- 3 files changed, 23 insertions(+), 21 deletions(-) delete mode 100644 cymep/functions/output_templates/README.md delete mode 100644 cymep/functions/output_templates/html_template.txt diff --git a/cymep/functions/output_templates/README.md b/cymep/functions/output_templates/README.md deleted file mode 100644 index 2b5ee6c..0000000 --- a/cymep/functions/output_templates/README.md +++ /dev/null @@ -1,5 +0,0 @@ -### Output Templates - -These files are used to generate the CMEC html landing page and output description JSON. - -If new outputs are added to CyMeP, a description should be added to the template output_desc.json. Figures are organized by folder structure under the key "fig". CSV files are found under "csv-files" and divided by "mean" and "metric" versions. The NetCDF file is under "netcdf-files". An additional "statistics" section holds information for generating descriptions for the metrics in the CSV files. If the new outputs are located in different folders or have a different structure than existing outputs, additional code will likely be needed in ../write_cmec.py as well. diff --git a/cymep/functions/output_templates/html_template.txt b/cymep/functions/output_templates/html_template.txt deleted file mode 100644 index 30d41b8..0000000 --- a/cymep/functions/output_templates/html_template.txt +++ /dev/null @@ -1,12 +0,0 @@ - - -CyMeP Results -

CyMeP metrics and figures

-

Metrics files

-
$json_metric -
-

Plots

-

$title

-

$plot

- - \ No newline at end of file diff --git a/cymep/functions/write_cmec.py b/cymep/functions/write_cmec.py index a831b0e..f7ae3e9 100644 --- a/cymep/functions/write_cmec.py +++ b/cymep/functions/write_cmec.py @@ -15,7 +15,6 @@ from string import Template import sys import numpy as np -import xarray as xr def define_figures(): """Store descriptions and longnames of the CyMeP output figures, @@ -105,6 +104,28 @@ def define_figures(): } return figure_description +def html_template(): + html_template = [ + "", + "", + "CyMeP Results", + "

CyMeP metrics and figures

", + "

The Cyclone Metrics Package provides a comprehensive set of ", + "metrics for comparing tropical cyclones in gridded climate data.

" + "

Reference: Zarzycki, C. M., Ullrich, P. A., and Reed, K. A. (2021). " + "Metrics for evaluating tropical cyclones in climate data. " + "Journal of Applied Meteorology and Climatology. " + "doi: 10.1175/JAMC-D-20-0149.1.

" + "

Metrics files

", + "
$json_metric", + "
", + "

Plots

", + "

$title

", + "

$plot

", + "", + ""] + return html_template + def populate_filename(data_dict, filename): """Combine filename and description info for output.json.""" output_dict = {} @@ -218,9 +239,7 @@ def populate_html_figures(html_lines, fig_dict): html_list["fig"]["spatial"] = new_spatial_list # Generate html page from template - html_dir = os.path.join(codedir,"cymep/functions/output_templates/html_template.txt") - with open(html_dir, "r") as html_template: - html_lines = html_template.readlines() + html_lines = html_template() # Add links to all the output images html_lines = populate_html_figures(html_lines, html_list["fig"]) From f7de452f6c47fe15de52d19aa3ba57390c832ac3 Mon Sep 17 00:00:00 2001 From: Ana Ordonez Date: Mon, 14 Jun 2021 14:17:46 -0700 Subject: [PATCH 6/9] remove double import --- cymep/cymep.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cymep/cymep.py b/cymep/cymep.py index 34de6d5..2622055 100644 --- a/cymep/cymep.py +++ b/cymep/cymep.py @@ -5,7 +5,6 @@ import json import scipy.stats as sps from netCDF4 import Dataset -import json sys.path.insert(0, './functions') from getSettings import * From 307f1058c331c6b68509b8561884a4a5cacc58fc Mon Sep 17 00:00:00 2001 From: Ana Ordonez Date: Mon, 14 Jun 2021 14:25:47 -0700 Subject: [PATCH 7/9] comments and minor changes --- cymep/cymep.py | 2 ++ cymep/functions/write_spatial.py | 6 +++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/cymep/cymep.py b/cymep/cymep.py index 2622055..2f98af2 100644 --- a/cymep/cymep.py +++ b/cymep/cymep.py @@ -512,6 +512,8 @@ # Write out primary stats files csv_base = os.path.splitext(csvfilename)[0]+'_'+strbasin +# desc is for cmec-driver 'output.json' metadata file: +# desc = [short_name, long_name, description] fname = 'metrics_'+csv_base+'_spatial_corr' desc = ['spatial_corr','spatial correlation','Statistics for spatial correlation table'] write_single_csv(rxydict,strs,wkdir,fname,desc,cmec_driver) diff --git a/cymep/functions/write_spatial.py b/cymep/functions/write_spatial.py index e740507..86ce57a 100644 --- a/cymep/functions/write_spatial.py +++ b/cymep/functions/write_spatial.py @@ -132,7 +132,7 @@ def create_output_json(wkdir,modeldir): with open(outfilepath,"w") as outfilename: json.dump(out_json, outfilename, indent=2) -def write_spatial_netcdf(spatialdict,permondict,peryrdict,taydict,modelsin,nyears,nmonths,latin,lonin,globaldict,wkdir,cmec=False): +def write_spatial_netcdf(spatialdict,permondict,peryrdict,taydict,modelsin,nyears,nmonths,latin,lonin,globaldict,wkdir='.',cmec=False): # Convert modelsin from pandas to list modelsin=modelsin.tolist() @@ -410,9 +410,9 @@ def write_dict_csv(vardict,modelsin): np.savetxt(csvfilename, tmp, delimiter=",", fmt="%s") -def write_single_csv(vardict,modelsin,wkdir,csvname,desc,cmec=False): +def write_single_csv(vardict,modelsin,wkdir,csvname,desc='',cmec=False): """Write metrics to csv file. If cmec=True, also write metrics to JSON. - desc is a 3-part list of strings containing 1) unique identifier, + The variable 'desc' is a 3-part list of strings containing 1) unique identifier, 2) longname 3) description.""" # create variable array From 544f40983196ce9c6aeb633874a3c7efc317c008 Mon Sep 17 00:00:00 2001 From: Ana Ordonez Date: Thu, 2 Dec 2021 16:19:25 -0800 Subject: [PATCH 8/9] update cmec-driver instructions --- README.md | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 75c41ff..2ee0d2d 100644 --- a/README.md +++ b/README.md @@ -136,22 +136,23 @@ This will produce a suite of figures in various subfolders within `./fig/`. ### Run with CMEC driver -An alternative workflow is available with [cmec-driver](https://github.com/cmecmetrics/cmec-driver) and conda. The workflow is: -1. Clone the coastal-storm-metrics repo. -2. Use conda to dependencies in an environment called "_CMEC_cymep". This can be done using the provided yml file: +An alternative workflow is available with [cmec-driver](https://github.com/cmecmetrics/cmec-driver) and conda. This workflow uses conda to manage python environments. Follow the cmec-driver [installation instructions](https://github.com/cmecmetrics/cmec-driver#cmec-driver) to install cmec-driver in a conda environment. Then use the following workflow: +1. Clone the coastal-storm-metrics repository. +2. Use conda to install dependencies in an environment called "_CMEC_cymep". This can be done using the provided yml file: `conda env create --file cymep.yml` -3. From the `cmec-driver` directory, register CyMeP in the cmec library: -`python cmec-driver.py register ` +3. Register CyMeP in the cmec library: +`cmec-driver register ` - If you have not run cmec-driver before, you must also register your conda installation information: - `python cmec-driver.py setup -conda_source -env_dir ` + `cmec-driver setup --conda_source --env_dir ` For a standard anaconda or miniconda installation, this might look like: - `python cmec-driver.py setup -conda_source ~/miniconda3/etc/profile.d/conda.sh -env_dir ~/miniconda3/envs/` -4. Add TempestExtremes ASCII trajectories to `cmec-driver/model`. + `cmec-driver setup --conda_source ~/miniconda3/etc/profile.d/conda.sh --env_dir ~/miniconda3/envs/` +4. Create a "cmec-driver" folder *outside* of the coastal-storm-metrics repo. In the cmec-driver folder, create two subfolders: "model" and "output". +5. Add TempestExtremes ASCII trajectories to `cmec-driver/model`. - For testing, copy `cymep/trajs/*` to `cmec-driver/model` -5. Create configuration csv in cmec-driver/model. - - For testing, copy `cymep/config-lists/rean_configs.csv` to `cmec-driver/model`. -6. Edit user settings in cmec-driver/config/cmec.json. +6. Create configuration csv in cmec-driver/model. + - For testing, copy `cymep/config-lists/rean_configs.csv` to `cmec-driver/model/rean_configs.csv`. +7. Edit user settings in cmec-driver/config/cmec.json. - For testing, use the default settings. -7. Run CyMeP module from cmec-driver: -`python cmec-driver.py run model/ output/ CyMeP` -8. Open `/output/CyMeP/index.html` to view results. +8. Run CyMeP module from the cmec-driver directory: +`cmec-driver run model/ output/ CyMeP` +9. Open `/output/CyMeP/index.html` to view results. From 049f0a71bd538c5832e75428f745f16ca037113b Mon Sep 17 00:00:00 2001 From: Ana Ordonez Date: Thu, 2 Dec 2021 16:21:05 -0800 Subject: [PATCH 9/9] update cmec.json path --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2ee0d2d..c7ccdf0 100644 --- a/README.md +++ b/README.md @@ -151,7 +151,7 @@ An alternative workflow is available with [cmec-driver](https://github.com/cmecm - For testing, copy `cymep/trajs/*` to `cmec-driver/model` 6. Create configuration csv in cmec-driver/model. - For testing, copy `cymep/config-lists/rean_configs.csv` to `cmec-driver/model/rean_configs.csv`. -7. Edit user settings in cmec-driver/config/cmec.json. +7. Edit user settings in ~/.cmec/cmec.json. - For testing, use the default settings. 8. Run CyMeP module from the cmec-driver directory: `cmec-driver run model/ output/ CyMeP`