Skip to content

Commit

Permalink
Added timing and debugging info for demo scripts
Browse files Browse the repository at this point in the history
  • Loading branch information
abodeuis committed Jun 7, 2024
1 parent c01c503 commit 892f481
Show file tree
Hide file tree
Showing 4 changed files with 40 additions and 19 deletions.
7 changes: 5 additions & 2 deletions tests/test_usgsDemo.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ def test_mock_map(self):
true_segmentations='tests/data/true_segmentations',
map_images='tests/data/map_images',
legends='tests/data/legends',
log='/dev/null',
min_valid_range=5,
difficult_weight=0.7,
set_false_as='hard',
Expand All @@ -69,11 +70,12 @@ def test_mock_map(self):
# true_segmentations='tests/uncommited_data/true_segmentations',
# map_images='tests/uncommited_data/map_images',
# legends='tests/uncommited_data/legends',
# log='/dev/null',
# min_valid_range=0.1,
# difficult_weight=0.7,
# set_false_as='hard',
# color_range=4)
# expected_csv_path = 'tests/data/MT_OldBaldy_results.csv'
# expected_csv_path = 'tests/uncommited_data/MT_OldBaldy_results.csv'

# self.run_test_usgsDemo(args, expected_csv_path, log)
# log.info('Test passed successfully')
Expand All @@ -92,11 +94,12 @@ def test_mock_map(self):
# true_segmentations='tests/uncommited_data/true_segmentations',
# map_images='tests/uncommited_data/map_images',
# legends='tests/uncommited_data/legends',
# log='/dev/null',
# min_valid_range=0.1,
# difficult_weight=0.7,
# set_false_as='hard',
# color_range=4)
# expected_csv_path = 'tests/data/CA_Elsinore_results.csv'
# expected_csv_path = 'tests/uncommited_data/CA_Elsinore_results.csv'

# self.run_test_usgsDemo(args, expected_csv_path, log)
# log.info('Test passed successfully')
5 changes: 3 additions & 2 deletions tests/test_validationDemo.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def test_mock_map(self):
# log='/dev/null',
# verbose=False,
# feedback=False)
# expected_csv_path = 'tests/data/MT_OldBaldy_results.csv'
# expected_csv_path = 'tests/uncommited_data/MT_OldBaldy_results.csv'

# self.run_test_validationDemo(args, expected_csv_path, log)
# log.info('Test passed successfully')
Expand All @@ -104,7 +104,8 @@ def test_mock_map(self):
# log='/dev/null',
# verbose=False,
# feedback=False)
# expected_csv_path = 'tests/data/CA_Elsinore_results.csv'
# expected_csv_path = 'tests/uncommited_data/CA_Elsinore_results.csv'

# self.run_test_validationDemo(args, expected_csv_path, log)
# log.info('Test passed successfully')

25 changes: 23 additions & 2 deletions usgsDemo.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,12 @@
import os
import logging
import argparse
import pandas as pd
from usgs_grading_metric import feature_f_score
from time import time
from cmaas_utils.logging import start_logger

LOGGER_NAME = 'USGS_VALIDATION_DEMO'

def parse_command_line():
"""Runs Command line argument parser for pipeline. Exit program on bad arguments. Returns struct of arguments"""
Expand Down Expand Up @@ -88,6 +93,9 @@ def parse_directory(path : str) -> str:
type=int,
default=4,
help='The range of color variation to consider for the legend color. Defaults to 4')
optional_args.add_argument('--log',
default='logs/Latest.log',
help='Option to set the file logging will output to. Defaults to "logs/Latest.log"')

# Flags
flag_group = parser.add_argument_group('Flags', '')
Expand All @@ -100,13 +108,18 @@ def parse_directory(path : str) -> str:
return args

def main(args):
main_time = time()
global log
log = start_logger(LOGGER_NAME, args.log, log_level=logging.DEBUG, console_log_level=logging.INFO, writemode='w')

# Create output directory if it does not exist
if not os.path.exists(args.output):
os.makedirs(args.output)

results_df = pd.DataFrame(columns=['Map', 'Feature', 'F1 Score', 'Precision', 'Recall'])

print(f'Running USGS Grading Metric on {len(args.pred_segmentations)} files')
log.info(f'Running USGS Grading Metric on {len(args.pred_segmentations)} files')
skipped_files = []
potential_map_names = [os.path.splitext(m)[0] for m in os.listdir(args.map_images) if m.endswith('.tif')]
for pred_filepath in args.pred_segmentations:
map_name = [m for m in potential_map_names if m in pred_filepath][0]
Expand All @@ -115,12 +128,20 @@ def main(args):
true_filepath = os.path.join(args.true_segmentations, os.path.basename(pred_filepath))
json_filepath = os.path.join(args.legends, map_name + '.json')

if not os.path.exists(true_filepath) or not os.path.exists(map_filepath) or not os.path.exists(json_filepath):
log.warning(f'Could not find the necessary files for {pred_filepath}, skipping grading')
skipped_files.append(pred_filepath)
continue

result = feature_f_score(map_filepath, pred_filepath, true_filepath, legend_json_path=json_filepath, min_valid_range=args.min_valid_range, difficult_weight=args.difficult_weight, set_false_as=args.set_false_as, color_range=args.color_range)
results_df.loc[len(results_df)] = {'Map' : map_name, 'Feature' : feature_name, 'F1 Score' : result['f_score'], 'Precision' : result['precision'], 'Recall' : result['recall']}

csv_path = os.path.join(args.output, 'usgsDemo_results.csv')
print(f'Finished grading, saving results to {csv_path}')
results_df.to_csv(csv_path)
if len(skipped_files) > 0:
log.warning(f'Skipped grading on {len(skipped_files)} files. Could not find the necessary files for the following: {skipped_files}')
log.info(f'Finished grading, saving results to {csv_path}, Runtime was {time()-main_time} seconds')


if __name__ == '__main__':
args = parse_command_line()
Expand Down
22 changes: 9 additions & 13 deletions validationDemo.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
import argparse
import numpy as np
import pandas as pd
from time import time
from rich.progress import track
#from rasterio.features import sieve

import cmaas_utils.io as io
from cmaas_utils.types import MapUnitType
Expand Down Expand Up @@ -94,11 +94,6 @@ def parse_feature(feature_string : str) -> str:
optional_args.add_argument('-o', '--output',
default='results',
help='Directory to write the validation feedback to. Defaults to "results"')
# optional_args.add_argument('-f','--feature_type',
# type=parse_feature,
# default=MapUnitType.POLYGON,
# help='Type of features that will be graded on, will be used if the feature type can\'t be \
# detected from the file name. Available features are Point or Polygon')
optional_args.add_argument('--log',
default='logs/Latest.log',
help='Option to set the file logging will output to. Defaults to "logs/Latest.log"')
Expand Down Expand Up @@ -138,6 +133,7 @@ def parse_feature(feature_string : str) -> str:
return args

def main(args):
main_time = time()
# Start logger
if args.verbose:
global FILE_LOG_LEVEL, STREAM_LOG_LEVEL
Expand All @@ -149,7 +145,6 @@ def main(args):
# Log info statement to console even if in warning only mode
log.handlers[1].setLevel(logging.INFO)
log.info(f'Running pipeline on {os.uname()[1]} with following parameters:\n' +
# f'\tFeature type : {args.feature_type}\n' +
f'\tPred Data : {args.pred_segmentations}\n' +
f'\tTrue Data : {args.true_segmentations}\n' +
f'\tMaps : {args.map_images}\n' +
Expand All @@ -169,22 +164,22 @@ def main(args):
log.info(f'Starting grading of {len(args.pred_segmentations)} files')
last_map_filepath = None
last_legend_filepath = None
skipped_files = []
potential_map_names = [os.path.splitext(m)[0] for m in os.listdir(args.map_images) if m.endswith('.tif')]
pbar = track(args.pred_segmentations)
logging_handler = changeConsoleHandler(log, RichHandler(level=STREAM_LOG_LEVEL))
for pred_filepath in pbar:
feature_name = os.path.basename(os.path.splitext(pred_filepath)[0])
feature_type = MapUnitType.from_str(feature_name.split('_')[-1])
# if feature_type == MapUnitType.UNKNOWN:
# feature_type = args.feature_type
log.info(f'Processing {feature_name}')

# Load data
pred_image, _, _ = io.loadGeoTiff(pred_filepath)
try:
true_image, _, _ = io.loadGeoTiff(os.path.join(args.true_segmentations, os.path.basename(pred_filepath)))
except FileNotFoundError:
except:
log.error(f'No true segementation map present for {feature_name}. Skipping')
skipped_files.append(pred_filepath)
continue

map_name = [m for m in potential_map_names if m in pred_filepath][0]
Expand All @@ -211,7 +206,6 @@ def main(args):
results['USGS Precision'] = results['Precision']
results['USGS Recall'] = results['Recall']

#log.info(f'Results for {feature_name} : {results}')
log.info(f'Results for "{feature_name}" : ' +
f'F1 Score : {results["F1 Score"]:.2f}, ' +
f'Precision : {results["Precision"]:.2f}, ' +
Expand All @@ -232,7 +226,6 @@ def main(args):
results['USGS Precision'] = USGS_results['Precision']
results['USGS Recall'] = USGS_results['Recall']

# log.info(f'Results for {feature_name} {results}')
log.info(f'Results for "{feature_name}" : ' +
f'F1 Score : {results["F1 Score"]:.2f}, ' +
f'Precision : {results["Precision"]:.2f}, ' +
Expand All @@ -250,8 +243,11 @@ def main(args):
changeConsoleHandler(log, logging_handler)

csv_path = os.path.join(args.output, 'validationDemo_results.csv')
log.info(f'Finished grading, saving results to {csv_path}')
results_df.to_csv(csv_path)
if len(skipped_files) > 0:
log.warning(f'Skipped grading on {len(skipped_files)} files. Could not find the necessary files for the following: {skipped_files}')
log.info(f'Finished grading, saving results to {csv_path}, Runtime was {time()-main_time} seconds')


if __name__=='__main__':
args = parse_command_line()
Expand Down

0 comments on commit 892f481

Please sign in to comment.