diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 84cfd8d..b1cf9ba 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -58,3 +58,4 @@ jobs: pip install numpy pip install GDAL==$(gdal-config --version) --global-option=build_ext --global-option="-I/usr/include/gdal" python test_app.py + python test_yolo.py diff --git a/test_yolo.py b/test_yolo.py index 09bcb68..25dd724 100644 --- a/test_yolo.py +++ b/test_yolo.py @@ -1,13 +1,17 @@ +# Standard library imports import os import time import warnings + +# Third party imports import tensorflow as tf -from hot_fair_utilities import preprocess, predict, polygonize +# Reader imports +from hot_fair_utilities import polygonize, predict, preprocess from hot_fair_utilities.preprocessing.yolo_format import yolo_format from train_yolo import train as train_yolo -warnings.simplefilter(action='ignore', category=FutureWarning) +warnings.simplefilter(action="ignore", category=FutureWarning) class print_time: @@ -41,7 +45,7 @@ def __exit__(self, type, value, traceback): rasterize=True, rasterize_options=["binary"], georeference_images=True, - multimasks=True # new arg + multimasks=True, # new arg ) yolo_data_dir = f"{base_path}/yolo" @@ -50,19 +54,20 @@ def __exit__(self, type, value, traceback): preprocessed_dirs=preprocess_output, yolo_dir=yolo_data_dir, multimask=True, - p_val=0.05 + p_val=0.05, ) -train_yolo(data=f"{base_path}", - weights=f"{os.getcwd()}/checkpoints/yolov8n-seg_ramp-training_ep500_bs16_deg30_pc2.0/weights/best.pt", - gpu="cpu", - epochs=2, - batch_size=16, - pc=2.0 - ) +output_path = train_yolo( + data=f"{base_path}", + weights=f"{os.getcwd()}/weights/yolov8alb/best.pt", + gpu="cpu", + epochs=2, + batch_size=16, + pc=2.0, +) prediction_output = f"{base_path}/prediction/output" -model_path = f"{os.getcwd()}/checkpoints/yolov8n-seg_sample_2_ep2_bs16_pc2.0/weights/best.pt" +model_path = f"{output_path}/weights/best.pt" with print_time("inference"): predict( checkpoint_path=model_path, diff --git a/train_yolo.py b/train_yolo.py index 4725b1b..1f618d4 100644 --- a/train_yolo.py +++ b/train_yolo.py @@ -1,11 +1,14 @@ +# Standard library imports import argparse -import torch import os -import ultralytics from pathlib import Path -from hot_fair_utilities.model.yolo import YOLOSegWithPosWeight +# Third party imports +import torch +import ultralytics +# Reader imports +from hot_fair_utilities.model.yolo import YOLOSegWithPosWeight ROOT = Path(__file__).parent.absolute() DATA_ROOT = str(ROOT / "ramp-training") @@ -33,17 +36,34 @@ def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--gpu', type=str, default="0", help='GPU id') - parser.add_argument('--data', type=str, default=os.path.join(DATA_ROOT), - help='Directory containing diractory \'yolo\' with dataset.yaml.') - parser.add_argument('--weights', type=str, default="yolov8n-seg.yaml", - help='See https://docs.ultralytics.com/tasks/detect/#train') - parser.add_argument('--epochs', type=int, default=100, - help='Num of training epochs. Default is 100.') - parser.add_argument('--batch-size', type=int, default=16, - help='Number of images in a single batch.') - parser.add_argument('--pc', type=float, default=1.0, - help='Positive weight in BCE loss. pc > 1 (pc < 1) encourages higher recall (precision)') + parser.add_argument("--gpu", type=str, default="0", help="GPU id") + parser.add_argument( + "--data", + type=str, + default=os.path.join(DATA_ROOT), + help="Directory containing diractory 'yolo' with dataset.yaml.", + ) + parser.add_argument( + "--weights", + type=str, + default="yolov8n-seg.yaml", + help="See https://docs.ultralytics.com/tasks/detect/#train", + ) + parser.add_argument( + "--epochs", + type=int, + default=100, + help="Num of training epochs. Default is 100.", + ) + parser.add_argument( + "--batch-size", type=int, default=16, help="Number of images in a single batch." + ) + parser.add_argument( + "--pc", + type=float, + default=1.0, + help="Positive weight in BCE loss. pc > 1 (pc < 1) encourages higher recall (precision)", + ) opt = parser.parse_args() return opt @@ -51,18 +71,27 @@ def parse_opt(): def main(): opt = parse_opt() os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.gpu) - print(f"GPU available: {torch.cuda.is_available()}, GPU count: {torch.cuda.device_count()}") + print( + f"GPU available: {torch.cuda.is_available()}, GPU count: {torch.cuda.device_count()}" + ) train(**vars(opt)) -def train(data, weights, gpu, epochs, batch_size, pc): - back = "n" if "yolov8n" in weights else "s" if "yolov8s" in weights else "m" if "yolov8m" in weights else "?" +def train(data, weights, gpu, epochs, batch_size, pc, output_path): + back = ( + "n" + if "yolov8n" in weights + else "s" if "yolov8s" in weights else "m" if "yolov8m" in weights else "?" + ) data_scn = str(Path(data) / "yolo" / "dataset.yaml") dataset = data_scn.split("/")[-3] kwargs = HYPERPARAM_CHANGES print(f"Backbone: {back}, Dataset: {dataset}, Epochs: {epochs}") name = f"yolov8{back}-seg_{dataset}_ep{epochs}_bs{batch_size}" + + if output_path: + name = output_path if float(pc) != 0.0: name += f"_pc{pc}" kwargs = {**kwargs, "pc": pc} @@ -80,8 +109,9 @@ def train(data, weights, gpu, epochs, batch_size, pc): resume=resume, deterministic=False, device=[int(i) for i in gpu.split(",")] if "," in gpu else gpu, - **kwargs + **kwargs, ) + return name def check4checkpoint(name, weights): diff --git a/weights/yolov8alb/best.pt b/weights/yolov8alb/best.pt new file mode 100644 index 0000000..985748b Binary files /dev/null and b/weights/yolov8alb/best.pt differ