Skip to content

Commit

Permalink
Add yolo weights and run case for yolo
Browse files Browse the repository at this point in the history
  • Loading branch information
kshitijrajsharma committed Oct 22, 2024
1 parent be8913a commit ee840fb
Show file tree
Hide file tree
Showing 4 changed files with 66 additions and 30 deletions.
1 change: 1 addition & 0 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -58,3 +58,4 @@ jobs:
pip install numpy
pip install GDAL==$(gdal-config --version) --global-option=build_ext --global-option="-I/usr/include/gdal"
python test_app.py
python test_yolo.py
29 changes: 17 additions & 12 deletions test_yolo.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,17 @@
# Standard library imports
import os
import time
import warnings

# Third party imports
import tensorflow as tf

from hot_fair_utilities import preprocess, predict, polygonize
# Reader imports
from hot_fair_utilities import polygonize, predict, preprocess
from hot_fair_utilities.preprocessing.yolo_format import yolo_format
from train_yolo import train as train_yolo

warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action="ignore", category=FutureWarning)


class print_time:
Expand Down Expand Up @@ -41,7 +45,7 @@ def __exit__(self, type, value, traceback):
rasterize=True,
rasterize_options=["binary"],
georeference_images=True,
multimasks=True # new arg
multimasks=True, # new arg
)

yolo_data_dir = f"{base_path}/yolo"
Expand All @@ -50,19 +54,20 @@ def __exit__(self, type, value, traceback):
preprocessed_dirs=preprocess_output,
yolo_dir=yolo_data_dir,
multimask=True,
p_val=0.05
p_val=0.05,
)

train_yolo(data=f"{base_path}",
weights=f"{os.getcwd()}/checkpoints/yolov8n-seg_ramp-training_ep500_bs16_deg30_pc2.0/weights/best.pt",
gpu="cpu",
epochs=2,
batch_size=16,
pc=2.0
)
output_path = train_yolo(
data=f"{base_path}",
weights=f"{os.getcwd()}/weights/yolov8alb/best.pt",
gpu="cpu",
epochs=2,
batch_size=16,
pc=2.0,
)

prediction_output = f"{base_path}/prediction/output"
model_path = f"{os.getcwd()}/checkpoints/yolov8n-seg_sample_2_ep2_bs16_pc2.0/weights/best.pt"
model_path = f"{output_path}/weights/best.pt"
with print_time("inference"):
predict(
checkpoint_path=model_path,
Expand Down
66 changes: 48 additions & 18 deletions train_yolo.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,14 @@
# Standard library imports
import argparse
import torch
import os
import ultralytics
from pathlib import Path

from hot_fair_utilities.model.yolo import YOLOSegWithPosWeight
# Third party imports
import torch
import ultralytics

# Reader imports
from hot_fair_utilities.model.yolo import YOLOSegWithPosWeight

ROOT = Path(__file__).parent.absolute()
DATA_ROOT = str(ROOT / "ramp-training")
Expand Down Expand Up @@ -33,36 +36,62 @@

def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=str, default="0", help='GPU id')
parser.add_argument('--data', type=str, default=os.path.join(DATA_ROOT),
help='Directory containing diractory \'yolo\' with dataset.yaml.')
parser.add_argument('--weights', type=str, default="yolov8n-seg.yaml",
help='See https://docs.ultralytics.com/tasks/detect/#train')
parser.add_argument('--epochs', type=int, default=100,
help='Num of training epochs. Default is 100.')
parser.add_argument('--batch-size', type=int, default=16,
help='Number of images in a single batch.')
parser.add_argument('--pc', type=float, default=1.0,
help='Positive weight in BCE loss. pc > 1 (pc < 1) encourages higher recall (precision)')
parser.add_argument("--gpu", type=str, default="0", help="GPU id")
parser.add_argument(
"--data",
type=str,
default=os.path.join(DATA_ROOT),
help="Directory containing diractory 'yolo' with dataset.yaml.",
)
parser.add_argument(
"--weights",
type=str,
default="yolov8n-seg.yaml",
help="See https://docs.ultralytics.com/tasks/detect/#train",
)
parser.add_argument(
"--epochs",
type=int,
default=100,
help="Num of training epochs. Default is 100.",
)
parser.add_argument(
"--batch-size", type=int, default=16, help="Number of images in a single batch."
)
parser.add_argument(
"--pc",
type=float,
default=1.0,
help="Positive weight in BCE loss. pc > 1 (pc < 1) encourages higher recall (precision)",
)
opt = parser.parse_args()
return opt


def main():
opt = parse_opt()
os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.gpu)
print(f"GPU available: {torch.cuda.is_available()}, GPU count: {torch.cuda.device_count()}")
print(
f"GPU available: {torch.cuda.is_available()}, GPU count: {torch.cuda.device_count()}"
)
train(**vars(opt))


def train(data, weights, gpu, epochs, batch_size, pc):
back = "n" if "yolov8n" in weights else "s" if "yolov8s" in weights else "m" if "yolov8m" in weights else "?"
def train(data, weights, gpu, epochs, batch_size, pc, output_path):
back = (
"n"
if "yolov8n" in weights
else "s" if "yolov8s" in weights else "m" if "yolov8m" in weights else "?"
)
data_scn = str(Path(data) / "yolo" / "dataset.yaml")
dataset = data_scn.split("/")[-3]
kwargs = HYPERPARAM_CHANGES

print(f"Backbone: {back}, Dataset: {dataset}, Epochs: {epochs}")
name = f"yolov8{back}-seg_{dataset}_ep{epochs}_bs{batch_size}"

if output_path:
name = output_path
if float(pc) != 0.0:
name += f"_pc{pc}"
kwargs = {**kwargs, "pc": pc}
Expand All @@ -80,8 +109,9 @@ def train(data, weights, gpu, epochs, batch_size, pc):
resume=resume,
deterministic=False,
device=[int(i) for i in gpu.split(",")] if "," in gpu else gpu,
**kwargs
**kwargs,
)
return name


def check4checkpoint(name, weights):
Expand Down
Binary file added weights/yolov8alb/best.pt
Binary file not shown.

0 comments on commit ee840fb

Please sign in to comment.