Skip to content

Commit

Permalink
fix pre-commit
Browse files Browse the repository at this point in the history
  • Loading branch information
LZHgrla committed Nov 29, 2023
1 parent d1fb769 commit c995b9b
Show file tree
Hide file tree
Showing 17 changed files with 203 additions and 219 deletions.
6 changes: 3 additions & 3 deletions detect.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from utils.datasets import LoadImages, LoadStreams
from utils.general import (check_img_size, check_imshow, increment_path,
non_max_suppression, scale_coords, set_logging,
strip_optimizer, xyxy2xywh)
xyxy2xywh)
from utils.plots import plot_one_box
from utils.torch_utils import intersect_dicts, select_device, time_synchronized

Expand Down Expand Up @@ -98,7 +98,7 @@ def detect(save_img=False):
if img.ndimension() == 3:
img = img.unsqueeze(0)

# Warmup
# warm up
if device.type != 'cpu' and (old_img_b != img.shape[0]
or old_img_h != img.shape[2]
or old_img_w != img.shape[3]):
Expand Down Expand Up @@ -201,7 +201,7 @@ def detect(save_img=False):

if save_txt or save_img:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
#print(f"Results saved to {save_dir}{s}")
# print(f"Results saved to {save_dir}{s}")

print(f'Done. ({time.time() - t0:.3f}s)')

Expand Down
11 changes: 4 additions & 7 deletions get_dynamic_thres.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,5 @@
import argparse
import logging
import os
from pathlib import Path
from threading import Thread

import numpy as np
import torch
Expand All @@ -29,14 +26,14 @@ def get_thres(data,
set_logging()
device = select_device(opt.device, batch_size=batch_size)
if isinstance(data, str):
is_coco = data.endswith('coco.yaml')
# is_coco = data.endswith('coco.yaml')
with open(data) as f:
data = yaml.load(f, Loader=yaml.SafeLoader)
check_dataset(data) # check
nc = int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95,
10).to(device) # iou vector for [email protected]:0.95
niou = iouv.numel()
# iouv = torch.linspace(0.5, 0.95,
# 10).to(device) # iou vector for [email protected]:0.95
# niou = iouv.numel()

# Load model
model = Model(cfg, ch=3, nc=nc) # create
Expand Down
6 changes: 3 additions & 3 deletions hyp/hyp.finetune.dynamic.adam.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@ lr0: 0.00001 # initial learning rate
lrf: 1 # final OneCycleLR learning rate (lr0 * lrf)
momentum: 0.937 # SGD momentum/Adam beta1
weight_decay: 0.005 # optimizer weight decay 5e-4
warmup_epochs: 0.01 # warmup epochs (fractions ok)
warmup_momentum: 0.8 # warmup initial momentum
warmup_bias_lr: 0.01 # warmup initial bias lr
warmup_epochs: 0.01 # warm up epochs (fractions ok)
warmup_momentum: 0.8 # warm up initial momentum
warmup_bias_lr: 0.01 # warm up initial bias lr
box: 0.05 # box loss gain
cls: 0.3 # cls loss gain
cls_pw: 1.0 # cls BCELoss positive_weight
Expand Down
6 changes: 3 additions & 3 deletions hyp/hyp.scratch.p5.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@ lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)
momentum: 0.937 # SGD momentum/Adam beta1
weight_decay: 0.0005 # optimizer weight decay 5e-4
warmup_epochs: 3.0 # warmup epochs (fractions ok)
warmup_momentum: 0.8 # warmup initial momentum
warmup_bias_lr: 0.1 # warmup initial bias lr
warmup_epochs: 3.0 # warm up epochs (fractions ok)
warmup_momentum: 0.8 # warm up initial momentum
warmup_bias_lr: 0.1 # warm up initial bias lr
box: 0.05 # box loss gain
cls: 0.3 # cls loss gain
cls_pw: 1.0 # cls BCELoss positive_weight
Expand Down
6 changes: 3 additions & 3 deletions hyp/hyp.scratch.p6.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@ lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf)
momentum: 0.937 # SGD momentum/Adam beta1
weight_decay: 0.0005 # optimizer weight decay 5e-4
warmup_epochs: 3.0 # warmup epochs (fractions ok)
warmup_momentum: 0.8 # warmup initial momentum
warmup_bias_lr: 0.1 # warmup initial bias lr
warmup_epochs: 3.0 # warm up epochs (fractions ok)
warmup_momentum: 0.8 # warm up initial momentum
warmup_bias_lr: 0.1 # warm up initial bias lr
box: 0.05 # box loss gain
cls: 0.3 # cls loss gain
cls_pw: 1.0 # cls BCELoss positive_weight
Expand Down
37 changes: 11 additions & 26 deletions models/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

from utils.general import non_max_suppression

##### basic ####
# basic


def autopad(k, p=None): # kernel, padding
Expand Down Expand Up @@ -105,9 +105,7 @@ def forward(self, x):
return x


##### end of basic #####

##### cspnet #####
# cspnet


class SPPCSPC(nn.Module):
Expand All @@ -132,9 +130,7 @@ def forward(self, x):
return self.cv7(torch.cat((y1, y2), dim=1))


##### end of cspnet #####

##### yolor #####
# yolor


class ImplicitA(nn.Module):
Expand Down Expand Up @@ -165,9 +161,7 @@ def forward(self, x):
return self.implicit * x


##### end of yolor #####

##### repvgg #####
# repvgg


class RepConv(nn.Module):
Expand Down Expand Up @@ -302,7 +296,7 @@ def fuse_conv_bn(self, conv, bn):
def fuse_repvgg_block(self):
if self.deploy:
return
print(f'RepConv.fuse_repvgg_block')
print('RepConv.fuse_repvgg_block')

self.rbr_dense = self.fuse_conv_bn(self.rbr_dense[0],
self.rbr_dense[1])
Expand Down Expand Up @@ -346,9 +340,9 @@ def fuse_repvgg_block(self):
weight_identity_expanded = torch.nn.Parameter(
torch.zeros_like(weight_1x1_expanded))

#print(f"self.rbr_1x1.weight = {self.rbr_1x1.weight.shape}, ")
#print(f"weight_1x1_expanded = {weight_1x1_expanded.shape}, ")
#print(f"self.rbr_dense.weight = {self.rbr_dense.weight.shape}, ")
# print(f"self.rbr_1x1.weight = {self.rbr_1x1.weight.shape}, ")
# print(f"weight_1x1_expanded = {weight_1x1_expanded.shape}, ")
# print(f"self.rbr_dense.weight = {self.rbr_dense.weight.shape}, ")

self.rbr_dense.weight = torch.nn.Parameter(self.rbr_dense.weight +
weight_1x1_expanded +
Expand All @@ -373,9 +367,7 @@ def fuse_repvgg_block(self):
self.rbr_dense = None


##### end of repvgg #####

##### yolov5 #####
# yolov5


class NMS(nn.Module):
Expand All @@ -394,9 +386,7 @@ def forward(self, x):
classes=self.classes)


##### end of yolov5 ######

##### CBNet #####
# CBNet


class CBLinear(nn.Module):
Expand Down Expand Up @@ -439,9 +429,7 @@ def forward(self, xs):
return out


##### end of CBNet #####

##### DynamicDet #####
# DynamicDet


def sigmoid(logits, hard=False, threshold=0.5):
Expand Down Expand Up @@ -484,6 +472,3 @@ def forward(self, xs, thres=0.5):
else:
xs = xs.sigmoid()
return xs


##### end of DynamicDet #####
41 changes: 23 additions & 18 deletions models/yolo.py
Original file line number Diff line number Diff line change
@@ -1,29 +1,30 @@
import argparse
import logging
import math
import sys
from copy import deepcopy
from pathlib import Path

import yaml

sys.path.append('./') # to run '$ python *.py' files in subdirectories
logger = logging.getLogger(__name__)
import torch
import torch.nn as nn
import yaml
from torch.nn.modules.batchnorm import _BatchNorm

from models.common import *
from models.common import (NMS, SPPCSPC, AdaptiveRouter, CBFuse, CBLinear,
Concat, Conv, ConvCheckpoint, ImplicitA, ImplicitM,
ReOrg, RepConv, Shortcut, autoShape)
from utils.autoanchor import check_anchor_order
from utils.general import check_file, make_divisible, set_logging
from utils.general import make_divisible
from utils.torch_utils import (copy_attr, fuse_conv_and_bn, initialize_weights,
model_info, scale_img, select_device,
time_synchronized)
model_info, scale_img, time_synchronized)

try:
import thop # for FLOPS computation
except ImportError:
thop = None

sys.path.append('./') # to run '$ python *.py' files in subdirectories
logger = logging.getLogger(__name__)


class IDetect(nn.Module):
stride = None # strides computed during build
Expand Down Expand Up @@ -180,8 +181,10 @@ def __init__(self,
logger.info(
f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
self.yaml['nc'] = nc # override yaml value
self.model_b, self.save_b, self.model_b2, self.save_b2, self.model_h, self.save_h, self.model_h2, self.save_h2 = parse_model(
deepcopy(self.yaml), ch_b=[ch]) # model, savelist
(self.model_b, self.save_b, self.model_b2, self.save_b2, self.model_h,
self.save_h, self.model_h2,
self.save_h2) = parse_model(deepcopy(self.yaml),
ch_b=[ch]) # model, savelist
self.keep_input = self.yaml.get('keep_input', False)
self.names = [str(i) for i in range(self.yaml['nc'])] # default names
# print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
Expand Down Expand Up @@ -439,7 +442,7 @@ def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
]:
for m in model.modules():
if isinstance(m, RepConv):
#print(f" fuse_repvgg_block")
# print(f" fuse_repvgg_block")
m.fuse_repvgg_block()
elif type(m) is Conv and hasattr(m, 'bn'):
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
Expand Down Expand Up @@ -505,7 +508,7 @@ def parse_model(d, ch_b): # model_dict, input_channels(3)
for j, a in enumerate(args):
try:
args[j] = eval(a) if isinstance(a, str) else a # eval strings
except:
except Exception:
pass

n = max(round(n * gd), 1) if n > 1 else n # depth gain
Expand Down Expand Up @@ -556,7 +559,7 @@ def parse_model(d, ch_b): # model_dict, input_channels(3)
for j, a in enumerate(args):
try:
args[j] = eval(a) if isinstance(a, str) else a # eval strings
except:
except Exception:
pass

chs = []
Expand Down Expand Up @@ -633,7 +636,7 @@ def parse_model(d, ch_b): # model_dict, input_channels(3)
for j, a in enumerate(args):
try:
args[j] = eval(a) if isinstance(a, str) else a # eval strings
except:
except Exception:
pass
chs = []
for x in ([f] if isinstance(f, (int, str)) else f):
Expand Down Expand Up @@ -699,7 +702,7 @@ def parse_model(d, ch_b): # model_dict, input_channels(3)
for j, a in enumerate(args):
try:
args[j] = eval(a) if isinstance(a, str) else a # eval strings
except:
except Exception:
pass
chs = []
for x in ([f] if isinstance(f, (int, str)) else f):
Expand Down Expand Up @@ -761,5 +764,7 @@ def parse_model(d, ch_b): # model_dict, input_channels(3)
save_b.extend(d['b1_save'])
save_b2.extend(d['b2_save'])

return nn.Sequential(*layers_b), sorted(save_b), nn.Sequential(*layers_b2), sorted(save_b2), \
nn.Sequential(*layers_h), sorted(save_h), nn.Sequential(*layers_h2), sorted(save_h2)
return (nn.Sequential(*layers_b),
sorted(save_b), nn.Sequential(*layers_b2), sorted(save_b2),
nn.Sequential(*layers_h), sorted(save_h),
nn.Sequential(*layers_h2), sorted(save_h2))
37 changes: 21 additions & 16 deletions train_step1.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,10 @@
from utils.autoanchor import check_anchors
from utils.checkpoint import get_state_dict
from utils.datasets import create_dataloader
from utils.general import (check_dataset, check_file, check_git_status,
check_img_size, colorstr, fitness, get_latest_run,
increment_path, init_seeds, labels_to_class_weights,
labels_to_image_weights, one_cycle, set_logging,
strip_optimizer)
from utils.general import (check_dataset, check_file, check_img_size, colorstr,
fitness, get_latest_run, increment_path, init_seeds,
labels_to_class_weights, labels_to_image_weights,
one_cycle, set_logging, strip_optimizer)
from utils.loss import ComputeLoss, ComputeLossOTA, ComputeLossOTADual
from utils.plots import plot_images, plot_lr_scheduler, plot_results
from utils.torch_utils import (ModelEMA, intersect_dicts, is_parallel,
Expand Down Expand Up @@ -85,7 +84,8 @@ def train(hyp, opt, device, tb_writer=None):
loggers['wandb'] = wandb_logger.wandb
data_dict = wandb_logger.data_dict
if wandb_logger.wandb:
weight, epochs, hyp = opt.weight, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming
# WandbLogger might update weights, epochs if resuming
weight, epochs, hyp = opt.weight, opt.epochs, opt.hyp

nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes
names = ['item'] if opt.single_cls and len(
Expand Down Expand Up @@ -225,8 +225,11 @@ def train(hyp, opt, device, tb_writer=None):
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
if opt.linear_lr:
lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp[
'lrf'] # linear

def get_linear_lr(x):
return (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf']

lf = get_linear_lr
else:
lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
Expand Down Expand Up @@ -362,9 +365,10 @@ def train(hyp, opt, device, tb_writer=None):

# Start training
t0 = time.time()
nw = max(round(hyp['warmup_epochs'] * nb),
1000) # number of warmup iterations, max(3 epochs, 1k iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
nw = max(
round(hyp['warmup_epochs'] * nb),
1000) # number of warm up iterations, max(3 epochs, 1k iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warm up to < 1/2 of training
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0
) # P, R, [email protected], [email protected], val_loss(box, obj, cls)
Expand Down Expand Up @@ -425,7 +429,7 @@ def train(hyp, opt, device, tb_writer=None):
imgs = imgs.to(device, non_blocking=True).float(
) / 255.0 # uint8 to float32, 0-255 to 0.0-1.0

# Warmup
# Warm up
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
Expand Down Expand Up @@ -505,8 +509,8 @@ def train(hyp, opt, device, tb_writer=None):
]
})

# end batch ------------------------------------------------------------------------------------------------
# end epoch ----------------------------------------------------------------------------------------------------
# end batch ----------------------------------------------------------------------------------------------
# end epoch --------------------------------------------------------------------------------------------------

# Scheduler
lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard
Expand Down Expand Up @@ -620,7 +624,7 @@ def train(hyp, opt, device, tb_writer=None):
best_model=best_fitness == fi)
del ckpt

# end epoch ----------------------------------------------------------------------------------------------------
# end epoch --------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
# Plots
Expand Down Expand Up @@ -795,7 +799,8 @@ def train(hyp, opt, device, tb_writer=None):
f, Loader=yaml.SafeLoader)) # replace
opt.cfg, opt.weight, opt.resume = os.path.relpath(
Path(ckpt).parent.parent / 'cfg.yaml'), ckpt, True
opt.batch_size, opt.global_rank, opt.local_rank = opt.total_batch_size, *apriori # reinstate
opt.batch_size, opt.global_rank, opt.local_rank = \
opt.total_batch_size, *apriori # reinstate
opt.save_dir = os.path.relpath(Path(ckpt).parent.parent)
logger.info('Resuming training from %s' % ckpt)
else:
Expand Down
Loading

0 comments on commit c995b9b

Please sign in to comment.