diff --git a/legacy/lasr_object_detection_yolo/CATKIN_IGNORE b/legacy/lasr_object_detection_yolo/CATKIN_IGNORE
deleted file mode 100644
index e69de29bb..000000000
diff --git a/legacy/lasr_object_detection_yolo/CMakeLists.txt b/legacy/lasr_object_detection_yolo/CMakeLists.txt
deleted file mode 100644
index 6abbae500..000000000
--- a/legacy/lasr_object_detection_yolo/CMakeLists.txt
+++ /dev/null
@@ -1,208 +0,0 @@
-cmake_minimum_required(VERSION 3.0.2)
-project(lasr_object_detection_yolo)
-
-## Compile as C++11, supported in ROS Kinetic and newer
-# add_compile_options(-std=c++11)
-
-## Find catkin macros and libraries
-## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
-## is used, also find other catkin packages
-find_package(catkin REQUIRED COMPONENTS
- rospy
- std_msgs
- sensor_msgs
- message_runtime
- message_generation
- lasr_vision_msgs
-)
-
-## System dependencies are found with CMake's conventions
-# find_package(Boost REQUIRED COMPONENTS system)
-
-## Uncomment this if the package has a setup.py. This macro ensures
-## modules and global scripts declared therein get installed
-## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
-catkin_python_setup()
-
-################################################
-## Declare ROS messages, services and actions ##
-################################################
-
-## To declare and build messages, services or actions from within this
-## package, follow these steps:
-## * Let MSG_DEP_SET be the set of packages whose message types you use in
-## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
-## * In the file package.xml:
-## * add a build_depend tag for "message_generation"
-## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
-## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
-## but can be declared for certainty nonetheless:
-## * add a exec_depend tag for "message_runtime"
-## * In this file (CMakeLists.txt):
-## * add "message_generation" and every package in MSG_DEP_SET to
-## find_package(catkin REQUIRED COMPONENTS ...)
-## * add "message_runtime" and every package in MSG_DEP_SET to
-## catkin_package(CATKIN_DEPENDS ...)
-## * uncomment the add_*_files sections below as needed
-## and list every .msg/.srv/.action file to be processed
-## * uncomment the generate_messages entry below
-## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
-
-## Generate messages in the 'msg' folder
-# add_message_files(
-# FILES
-# Detection.msg
-# )
-
-## Generate services in the 'srv' folder
-add_service_files(
- FILES
- YoloDetection.srv
-)
-
-## Generate actions in the 'action' folder
-# add_action_files(
-# FILES
-# Action1.action
-# Action2.action
-# )
-
-## Generate added messages and services with any dependencies listed here
-generate_messages(
- DEPENDENCIES
- std_msgs
- sensor_msgs
- lasr_vision_msgs
-)
-
-################################################
-## Declare ROS dynamic reconfigure parameters ##
-################################################
-
-## To declare and build dynamic reconfigure parameters within this
-## package, follow these steps:
-## * In the file package.xml:
-## * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
-## * In this file (CMakeLists.txt):
-## * add "dynamic_reconfigure" to
-## find_package(catkin REQUIRED COMPONENTS ...)
-## * uncomment the "generate_dynamic_reconfigure_options" section below
-## and list every .cfg file to be processed
-
-## Generate dynamic reconfigure parameters in the 'cfg' folder
-# generate_dynamic_reconfigure_options(
-# cfg/DynReconf1.cfg
-# cfg/DynReconf2.cfg
-# )
-
-###################################
-## catkin specific configuration ##
-###################################
-## The catkin_package macro generates cmake config files for your package
-## Declare things to be passed to dependent projects
-## INCLUDE_DIRS: uncomment this if your package contains header files
-## LIBRARIES: libraries you create in this project that dependent projects also need
-## CATKIN_DEPENDS: catkin_packages dependent projects also need
-## DEPENDS: system dependencies of this project that dependent projects also need
-catkin_package(
-# INCLUDE_DIRS include
-# LIBRARIES lasr_object_detection_yolo
-# CATKIN_DEPENDS rospy std_msgs
-# DEPENDS system_lib
-)
-
-###########
-## Build ##
-###########
-
-## Specify additional locations of header files
-## Your package locations should be listed before other locations
-include_directories(
-# include
- ${catkin_INCLUDE_DIRS}
-)
-
-## Declare a C++ library
-# add_library(${PROJECT_NAME}
-# src/${PROJECT_NAME}/lasr_object_detection_yolo.cpp
-# )
-
-## Add cmake target dependencies of the library
-## as an example, code may need to be generated before libraries
-## either from message generation or dynamic reconfigure
-# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
-
-## Declare a C++ executable
-## With catkin_make all packages are built within a single CMake context
-## The recommended prefix ensures that target names across packages don't collide
-# add_executable(${PROJECT_NAME}_node src/lasr_object_detection_yolo_node.cpp)
-
-## Rename C++ executable without prefix
-## The above recommended prefix causes long target names, the following renames the
-## target back to the shorter version for ease of user use
-## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
-# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
-
-## Add cmake target dependencies of the executable
-## same as for the library above
-# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
-
-## Specify libraries to link a library or executable target against
-# target_link_libraries(${PROJECT_NAME}_node
-# ${catkin_LIBRARIES}
-# )
-
-#############
-## Install ##
-#############
-
-# all install targets should use catkin DESTINATION variables
-# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
-
-## Mark executable scripts (Python etc.) for installation
-## in contrast to setup.py, you can choose the destination
-# catkin_install_python(PROGRAMS
-# scripts/my_python_script
-# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
-# )
-
-## Mark executables for installation
-## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html
-# install(TARGETS ${PROJECT_NAME}_node
-# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
-# )
-
-## Mark libraries for installation
-## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html
-# install(TARGETS ${PROJECT_NAME}
-# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
-# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
-# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
-# )
-
-## Mark cpp header files for installation
-# install(DIRECTORY include/${PROJECT_NAME}/
-# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
-# FILES_MATCHING PATTERN "*.h"
-# PATTERN ".svn" EXCLUDE
-# )
-
-## Mark other files for installation (e.g. launch and bag files, etc.)
-# install(FILES
-# # myfile1
-# # myfile2
-# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
-# )
-
-#############
-## Testing ##
-#############
-
-## Add gtest based cpp test target and link libraries
-# catkin_add_gtest(${PROJECT_NAME}-test test/test_lasr_object_detection_yolo.cpp)
-# if(TARGET ${PROJECT_NAME}-test)
-# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
-# endif()
-
-## Add folders to be run by python nosetests
-# catkin_add_nosetests(test)
diff --git a/legacy/lasr_object_detection_yolo/README.md b/legacy/lasr_object_detection_yolo/README.md
deleted file mode 100644
index 8420085ec..000000000
--- a/legacy/lasr_object_detection_yolo/README.md
+++ /dev/null
@@ -1,64 +0,0 @@
-# lasr_object_detection_yolo
-
-The lasr_object_detection_yolo package
-
-This package is maintained by:
-- [jared](mailto:jared@todo.todo)
-
-## Prerequisites
-
-This package depends on the following ROS packages:
-- catkin (buildtool)
-- rospy (build)
-- std_msgs (build)
-- rospy (exec)
-- std_msgs (exec)
-- lasr_vision_msgs
-
-Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package!
-
-## Usage
-
-Ask the package maintainer to write a `doc/USAGE.md` for their package!
-
-## Example
-
-Ask the package maintainer to write a `doc/EXAMPLE.md` for their package!
-
-## Technical Overview
-
-Ask the package maintainer to write a `doc/TECHNICAL.md` for their package!
-
-## ROS Definitions
-
-### Launch Files
-
-This package has no launch files.
-
-### Messages
-
-This package has no messages.
-
-### Services
-
-#### `YoloDetection`
-
-Request
-
-| Field | Type | Description |
-|:-:|:-:|---|
-| image_raw | sensor_msgs/Image | |
-| dataset | string | |
-| confidence | float32 | |
-| nms | float32 | |
-
-Response
-
-| Field | Type | Description |
-|:-:|:-:|---|
-| detected_objects | lasr_vision_msgs/Detection[] | |
-
-
-### Actions
-
-This package has no actions.
diff --git a/legacy/lasr_object_detection_yolo/__init__.py b/legacy/lasr_object_detection_yolo/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/legacy/lasr_object_detection_yolo/package.xml b/legacy/lasr_object_detection_yolo/package.xml
deleted file mode 100644
index 200170112..000000000
--- a/legacy/lasr_object_detection_yolo/package.xml
+++ /dev/null
@@ -1,66 +0,0 @@
-
-
- lasr_object_detection_yolo
- 0.0.0
- The lasr_object_detection_yolo package
-
-
-
-
- Jared Swift
-
-
-
-
-
- TODO
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- catkin
- rospy
- std_msgs
- rospy
- std_msgs
- rospy
- std_msgs
- lasr_vision_msgs
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/legacy/lasr_object_detection_yolo/setup.py b/legacy/lasr_object_detection_yolo/setup.py
deleted file mode 100644
index 151d41685..000000000
--- a/legacy/lasr_object_detection_yolo/setup.py
+++ /dev/null
@@ -1,12 +0,0 @@
-
-#!/usr/bin/env python3
-
-from distutils.core import setup
-from catkin_pkg.python_setup import generate_distutils_setup
-
-setup_args = generate_distutils_setup(
- packages=['lasr_object_detection_yolo'],
- package_dir={'': 'src'}
-)
-
-setup(**setup_args)
\ No newline at end of file
diff --git a/legacy/lasr_object_detection_yolo/src/__init__.py b/legacy/lasr_object_detection_yolo/src/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/__init__.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/__init__.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/config.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/config.py
deleted file mode 100644
index 197160634..000000000
--- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/config.py
+++ /dev/null
@@ -1,100 +0,0 @@
-import torch
-from darknet_pytorch.torch_utils import convert2cpu
-
-
-def parse_cfg(cfgfile):
- blocks = []
- fp = open(cfgfile, 'r')
- block = None
- line = fp.readline()
- while line != '':
- line = line.rstrip()
- if line == '' or line[0] == '#':
- line = fp.readline()
- continue
- elif line[0] == '[':
- if block:
- blocks.append(block)
- block = dict()
- block['type'] = line.lstrip('[').rstrip(']')
- # set default value
- if block['type'] == 'convolutional':
- block['batch_normalize'] = 0
- else:
- key, value = line.split('=')
- key = key.strip()
- if key == 'type':
- key = '_type'
- value = value.strip()
- block[key] = value
- line = fp.readline()
-
- if block:
- blocks.append(block)
- fp.close()
- return blocks
-
-
-def load_conv(buf, start, conv_model):
- num_w = conv_model.weight.numel()
- num_b = conv_model.bias.numel()
- conv_model.bias.data.copy_(torch.from_numpy(buf[start:start + num_b]));
- start = start + num_b
- conv_model.weight.data.copy_(torch.from_numpy(buf[start:start + num_w]).reshape(conv_model.weight.data.shape));
- start = start + num_w
- return start
-
-
-def save_conv(fp, conv_model):
- if conv_model.bias.is_cuda:
- convert2cpu(conv_model.bias.data).numpy().tofile(fp)
- convert2cpu(conv_model.weight.data).numpy().tofile(fp)
- else:
- conv_model.bias.data.numpy().tofile(fp)
- conv_model.weight.data.numpy().tofile(fp)
-
-
-def load_conv_bn(buf, start, conv_model, bn_model):
- num_w = conv_model.weight.numel()
- num_b = bn_model.bias.numel()
- bn_model.bias.data.copy_(torch.from_numpy(buf[start:start + num_b]));
- start = start + num_b
- bn_model.weight.data.copy_(torch.from_numpy(buf[start:start + num_b]));
- start = start + num_b
- bn_model.running_mean.copy_(torch.from_numpy(buf[start:start + num_b]));
- start = start + num_b
- bn_model.running_var.copy_(torch.from_numpy(buf[start:start + num_b]));
- start = start + num_b
- conv_model.weight.data.copy_(torch.from_numpy(buf[start:start + num_w]).reshape(conv_model.weight.data.shape));
- start = start + num_w
- return start
-
-
-def save_conv_bn(fp, conv_model, bn_model):
- if bn_model.bias.is_cuda:
- convert2cpu(bn_model.bias.data).numpy().tofile(fp)
- convert2cpu(bn_model.weight.data).numpy().tofile(fp)
- convert2cpu(bn_model.running_mean).numpy().tofile(fp)
- convert2cpu(bn_model.running_var).numpy().tofile(fp)
- convert2cpu(conv_model.weight.data).numpy().tofile(fp)
- else:
- bn_model.bias.data.numpy().tofile(fp)
- bn_model.weight.data.numpy().tofile(fp)
- bn_model.running_mean.numpy().tofile(fp)
- bn_model.running_var.numpy().tofile(fp)
- conv_model.weight.data.numpy().tofile(fp)
-
-
-def load_fc(buf, start, fc_model):
- num_w = fc_model.weight.numel()
- num_b = fc_model.bias.numel()
- fc_model.bias.data.copy_(torch.from_numpy(buf[start:start + num_b]));
- start = start + num_b
- fc_model.weight.data.copy_(torch.from_numpy(buf[start:start + num_w]));
- start = start + num_w
- return start
-
-
-def save_fc(fp, fc_model):
- fc_model.bias.data.numpy().tofile(fp)
- fc_model.weight.data.numpy().tofile(fp)
diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/darknet.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/darknet.py
deleted file mode 100644
index d3cab61f6..000000000
--- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/darknet.py
+++ /dev/null
@@ -1,480 +0,0 @@
-import torch.nn as nn
-import torch.nn.functional as F
-import numpy as np
-from darknet_pytorch.region_loss import RegionLoss
-from darknet_pytorch.yolo_layer import YoloLayer
-from darknet_pytorch.config import *
-from darknet_pytorch.torch_utils import *
-
-
-class Mish(torch.nn.Module):
- def __init__(self):
- super().__init__()
-
- def forward(self, x):
- x = x * (torch.tanh(torch.nn.functional.softplus(x)))
- return x
-
-
-class MaxPoolDark(nn.Module):
- def __init__(self, size=2, stride=1):
- super(MaxPoolDark, self).__init__()
- self.size = size
- self.stride = stride
-
- def forward(self, x):
- '''
- darknet output_size = (input_size + p - k) / s +1
- p : padding = k - 1
- k : size
- s : stride
- torch output_size = (input_size + 2*p -k) / s +1
- p : padding = k//2
- '''
- p = self.size // 2
- if ((x.shape[2] - 1) // self.stride) != ((x.shape[2] + 2 * p - self.size) // self.stride):
- padding1 = (self.size - 1) // 2
- padding2 = padding1 + 1
- else:
- padding1 = (self.size - 1) // 2
- padding2 = padding1
- if ((x.shape[3] - 1) // self.stride) != ((x.shape[3] + 2 * p - self.size) // self.stride):
- padding3 = (self.size - 1) // 2
- padding4 = padding3 + 1
- else:
- padding3 = (self.size - 1) // 2
- padding4 = padding3
- x = F.max_pool2d(F.pad(x, (padding3, padding4, padding1, padding2), mode='replicate'),
- self.size, stride=self.stride)
- return x
-
-
-class Upsample_expand(nn.Module):
- def __init__(self, stride=2):
- super(Upsample_expand, self).__init__()
- self.stride = stride
-
- def forward(self, x):
- assert (x.data.dim() == 4)
-
- x = x.view(x.size(0), x.size(1), x.size(2), 1, x.size(3), 1).\
- expand(x.size(0), x.size(1), x.size(2), self.stride, x.size(3), self.stride).contiguous().\
- view(x.size(0), x.size(1), x.size(2) * self.stride, x.size(3) * self.stride)
-
- return x
-
-
-class Upsample_interpolate(nn.Module):
- def __init__(self, stride):
- super(Upsample_interpolate, self).__init__()
- self.stride = stride
-
- def forward(self, x):
- assert (x.data.dim() == 4)
-
- out = F.interpolate(x, size=(x.size(2) * self.stride, x.size(3) * self.stride), mode='nearest')
- return out
-
-
-class Reorg(nn.Module):
- def __init__(self, stride=2):
- super(Reorg, self).__init__()
- self.stride = stride
-
- def forward(self, x):
- stride = self.stride
- assert (x.data.dim() == 4)
- B = x.data.size(0)
- C = x.data.size(1)
- H = x.data.size(2)
- W = x.data.size(3)
- assert (H % stride == 0)
- assert (W % stride == 0)
- ws = stride
- hs = stride
- x = x.view(B, C, H / hs, hs, W / ws, ws).transpose(3, 4).contiguous()
- x = x.view(B, C, H / hs * W / ws, hs * ws).transpose(2, 3).contiguous()
- x = x.view(B, C, hs * ws, H / hs, W / ws).transpose(1, 2).contiguous()
- x = x.view(B, hs * ws * C, H / hs, W / ws)
- return x
-
-
-class GlobalAvgPool2d(nn.Module):
- def __init__(self):
- super(GlobalAvgPool2d, self).__init__()
-
- def forward(self, x):
- N = x.data.size(0)
- C = x.data.size(1)
- H = x.data.size(2)
- W = x.data.size(3)
- x = F.avg_pool2d(x, (H, W))
- x = x.view(N, C)
- return x
-
-
-# for route, shortcut and sam
-class EmptyModule(nn.Module):
- def __init__(self):
- super(EmptyModule, self).__init__()
-
- def forward(self, x):
- return x
-
-
-# support route shortcut and reorg
-class Darknet(nn.Module):
- def __init__(self, cfgfile, inference=False):
- super(Darknet, self).__init__()
- self.inference = inference
- self.training = not self.inference
-
- self.blocks = parse_cfg(cfgfile)
- self.width = int(self.blocks[0]['width'])
- self.height = int(self.blocks[0]['height'])
-
- self.models = self.create_network(self.blocks) # merge conv, bn,leaky
- self.loss = self.models[len(self.models) - 1]
-
- if self.blocks[(len(self.blocks) - 1)]['type'] == 'region':
- self.anchors = self.loss.anchors
- self.num_anchors = self.loss.num_anchors
- self.anchor_step = self.loss.anchor_step
- self.num_classes = self.loss.num_classes
-
- self.header = torch.IntTensor([0, 0, 0, 0])
- self.seen = 0
-
- def forward(self, x):
- ind = -2
- self.loss = None
- outputs = dict()
- out_boxes = []
- for block in self.blocks:
- ind = ind + 1
- # if ind > 0:
- # return x
-
- if block['type'] == 'net':
- continue
- elif block['type'] in ['convolutional', 'maxpool', 'reorg', 'upsample', 'avgpool', 'softmax', 'connected']:
- x = self.models[ind](x)
- outputs[ind] = x
- elif block['type'] == 'route':
- layers = block['layers'].split(',')
- layers = [int(i) if int(i) > 0 else int(i) + ind for i in layers]
- if len(layers) == 1:
- if 'groups' not in block.keys() or int(block['groups']) == 1:
- x = outputs[layers[0]]
- outputs[ind] = x
- else:
- groups = int(block['groups'])
- group_id = int(block['group_id'])
- _, b, _, _ = outputs[layers[0]].shape
- x = outputs[layers[0]][:, b // groups * group_id:b // groups * (group_id + 1)]
- outputs[ind] = x
- elif len(layers) == 2:
- x1 = outputs[layers[0]]
- x2 = outputs[layers[1]]
- x = torch.cat((x1, x2), 1)
- outputs[ind] = x
- elif len(layers) == 4:
- x1 = outputs[layers[0]]
- x2 = outputs[layers[1]]
- x3 = outputs[layers[2]]
- x4 = outputs[layers[3]]
- x = torch.cat((x1, x2, x3, x4), 1)
- outputs[ind] = x
- else:
- print("rounte number > 2 ,is {}".format(len(layers)))
-
- elif block['type'] == 'shortcut':
- from_layer = int(block['from'])
- activation = block['activation']
- from_layer = from_layer if from_layer > 0 else from_layer + ind
- x1 = outputs[from_layer]
- x2 = outputs[ind - 1]
- x = x1 + x2
- if activation == 'leaky':
- x = F.leaky_relu(x, 0.1, inplace=True)
- elif activation == 'relu':
- x = F.relu(x, inplace=True)
- outputs[ind] = x
- elif block['type'] == 'sam':
- from_layer = int(block['from'])
- from_layer = from_layer if from_layer > 0 else from_layer + ind
- x1 = outputs[from_layer]
- x2 = outputs[ind - 1]
- x = x1 * x2
- outputs[ind] = x
- elif block['type'] == 'region':
- continue
- if self.loss:
- self.loss = self.loss + self.models[ind](x)
- else:
- self.loss = self.models[ind](x)
- outputs[ind] = None
- elif block['type'] == 'yolo':
- # if self.training:
- # pass
- # else:
- # boxes = self.models[ind](x)
- # out_boxes.append(boxes)
- boxes = self.models[ind](x)
- out_boxes.append(boxes)
- elif block['type'] == 'cost':
- continue
- else:
- print('unknown type %s' % (block['type']))
-
- if self.training:
- return out_boxes
- else:
- return get_region_boxes(out_boxes)
-
- def print_network(self):
- print_cfg(self.blocks)
-
- def create_network(self, blocks):
- models = nn.ModuleList()
-
- prev_filters = 3
- out_filters = []
- prev_stride = 1
- out_strides = []
- conv_id = 0
- for block in blocks:
- if block['type'] == 'net':
- prev_filters = int(block['channels'])
- continue
- elif block['type'] == 'convolutional':
- conv_id = conv_id + 1
- batch_normalize = int(block['batch_normalize'])
- filters = int(block['filters'])
- kernel_size = int(block['size'])
- stride = int(block['stride'])
- is_pad = int(block['pad'])
- pad = (kernel_size - 1) // 2 if is_pad else 0
- activation = block['activation']
- model = nn.Sequential()
- if batch_normalize:
- model.add_module('conv{0}'.format(conv_id),
- nn.Conv2d(prev_filters, filters, kernel_size, stride, pad, bias=False))
- model.add_module('bn{0}'.format(conv_id), nn.BatchNorm2d(filters))
- # model.add_module('bn{0}'.format(conv_id), BN2d(filters))
- else:
- model.add_module('conv{0}'.format(conv_id),
- nn.Conv2d(prev_filters, filters, kernel_size, stride, pad))
- if activation == 'leaky':
- model.add_module('leaky{0}'.format(conv_id), nn.LeakyReLU(0.1, inplace=True))
- elif activation == 'relu':
- model.add_module('relu{0}'.format(conv_id), nn.ReLU(inplace=True))
- elif activation == 'mish':
- model.add_module('mish{0}'.format(conv_id), Mish())
- elif activation == 'linear':
- model.add_module('linear{0}'.format(conv_id), nn.Identity())
- elif activation == 'logistic':
- model.add_module('sigmoid{0}'.format(conv_id), nn.Sigmoid())
- else:
- print("No convolutional activation named {}".format(activation))
-
- prev_filters = filters
- out_filters.append(prev_filters)
- prev_stride = stride * prev_stride
- out_strides.append(prev_stride)
- models.append(model)
- elif block['type'] == 'maxpool':
- pool_size = int(block['size'])
- stride = int(block['stride'])
- if stride == 1 and pool_size % 2:
- # You can use Maxpooldark instead, here is convenient to convert onnx.
- # Example: [maxpool] size=3 stride=1
- model = nn.MaxPool2d(kernel_size=pool_size, stride=stride, padding=pool_size // 2)
- elif stride == pool_size:
- # You can use Maxpooldark instead, here is convenient to convert onnx.
- # Example: [maxpool] size=2 stride=2
- model = nn.MaxPool2d(kernel_size=pool_size, stride=stride, padding=0)
- else:
- model = MaxPoolDark(pool_size, stride)
- out_filters.append(prev_filters)
- prev_stride = stride * prev_stride
- out_strides.append(prev_stride)
- models.append(model)
- elif block['type'] == 'avgpool':
- model = GlobalAvgPool2d()
- out_filters.append(prev_filters)
- models.append(model)
- elif block['type'] == 'softmax':
- model = nn.Softmax()
- out_strides.append(prev_stride)
- out_filters.append(prev_filters)
- models.append(model)
- elif block['type'] == 'cost':
- if block['_type'] == 'sse':
- model = nn.MSELoss(reduction='mean')
- elif block['_type'] == 'L1':
- model = nn.L1Loss(reduction='mean')
- elif block['_type'] == 'smooth':
- model = nn.SmoothL1Loss(reduction='mean')
- out_filters.append(1)
- out_strides.append(prev_stride)
- models.append(model)
- elif block['type'] == 'reorg':
- stride = int(block['stride'])
- prev_filters = stride * stride * prev_filters
- out_filters.append(prev_filters)
- prev_stride = prev_stride * stride
- out_strides.append(prev_stride)
- models.append(Reorg(stride))
- elif block['type'] == 'upsample':
- stride = int(block['stride'])
- out_filters.append(prev_filters)
- prev_stride = prev_stride // stride
- out_strides.append(prev_stride)
-
- models.append(Upsample_expand(stride))
- # models.append(Upsample_interpolate(stride))
-
- elif block['type'] == 'route':
- layers = block['layers'].split(',')
- ind = len(models)
- layers = [int(i) if int(i) > 0 else int(i) + ind for i in layers]
- if len(layers) == 1:
- if 'groups' not in block.keys() or int(block['groups']) == 1:
- prev_filters = out_filters[layers[0]]
- prev_stride = out_strides[layers[0]]
- else:
- prev_filters = out_filters[layers[0]] // int(block['groups'])
- prev_stride = out_strides[layers[0]] // int(block['groups'])
- elif len(layers) == 2:
- assert (layers[0] == ind - 1 or layers[1] == ind - 1)
- prev_filters = out_filters[layers[0]] + out_filters[layers[1]]
- prev_stride = out_strides[layers[0]]
- elif len(layers) == 4:
- assert (layers[0] == ind - 1)
- prev_filters = out_filters[layers[0]] + out_filters[layers[1]] + out_filters[layers[2]] + \
- out_filters[layers[3]]
- prev_stride = out_strides[layers[0]]
- else:
- print("route error!!!")
-
- out_filters.append(prev_filters)
- out_strides.append(prev_stride)
- models.append(EmptyModule())
- elif block['type'] == 'shortcut':
- ind = len(models)
- prev_filters = out_filters[ind - 1]
- out_filters.append(prev_filters)
- prev_stride = out_strides[ind - 1]
- out_strides.append(prev_stride)
- models.append(EmptyModule())
- elif block['type'] == 'sam':
- ind = len(models)
- prev_filters = out_filters[ind - 1]
- out_filters.append(prev_filters)
- prev_stride = out_strides[ind - 1]
- out_strides.append(prev_stride)
- models.append(EmptyModule())
- elif block['type'] == 'connected':
- filters = int(block['output'])
- if block['activation'] == 'linear':
- model = nn.Linear(prev_filters, filters)
- elif block['activation'] == 'leaky':
- model = nn.Sequential(
- nn.Linear(prev_filters, filters),
- nn.LeakyReLU(0.1, inplace=True))
- elif block['activation'] == 'relu':
- model = nn.Sequential(
- nn.Linear(prev_filters, filters),
- nn.ReLU(inplace=True))
- prev_filters = filters
- out_filters.append(prev_filters)
- out_strides.append(prev_stride)
- models.append(model)
- elif block['type'] == 'region':
- loss = RegionLoss()
- anchors = block['anchors'].split(',')
- loss.anchors = [float(i) for i in anchors]
- loss.num_classes = int(block['classes'])
- loss.num_anchors = int(block['num'])
- loss.anchor_step = len(loss.anchors) // loss.num_anchors
- loss.object_scale = float(block['object_scale'])
- loss.noobject_scale = float(block['noobject_scale'])
- loss.class_scale = float(block['class_scale'])
- loss.coord_scale = float(block['coord_scale'])
- out_filters.append(prev_filters)
- out_strides.append(prev_stride)
- models.append(loss)
- elif block['type'] == 'yolo':
- yolo_layer = YoloLayer()
- anchors = block['anchors'].split(',')
- anchor_mask = block['mask'].split(',')
- yolo_layer.anchor_mask = [int(i) for i in anchor_mask]
- yolo_layer.anchors = [float(i) for i in anchors]
- yolo_layer.num_classes = int(block['classes'])
- self.num_classes = yolo_layer.num_classes
- yolo_layer.num_anchors = int(block['num'])
- yolo_layer.anchor_step = len(yolo_layer.anchors) // yolo_layer.num_anchors
- yolo_layer.stride = prev_stride
- yolo_layer.scale_x_y = float(block['scale_x_y'])
- out_filters.append(prev_filters)
- out_strides.append(prev_stride)
- models.append(yolo_layer)
- else:
- print('unknown type %s' % (block['type']))
-
- return models
-
- def load_weights(self, weightfile):
- fp = open(weightfile, 'rb')
- header = np.fromfile(fp, count=5, dtype=np.int32)
- self.header = torch.from_numpy(header)
- self.seen = self.header[3]
- buf = np.fromfile(fp, dtype=np.float32)
- fp.close()
-
- start = 0
- ind = -2
- for block in self.blocks:
- if start >= buf.size:
- break
- ind = ind + 1
- if block['type'] == 'net':
- continue
- elif block['type'] == 'convolutional':
- model = self.models[ind]
- batch_normalize = int(block['batch_normalize'])
- if batch_normalize:
- start = load_conv_bn(buf, start, model[0], model[1])
- else:
- start = load_conv(buf, start, model[0])
- elif block['type'] == 'connected':
- model = self.models[ind]
- if block['activation'] != 'linear':
- start = load_fc(buf, start, model[0])
- else:
- start = load_fc(buf, start, model)
- elif block['type'] == 'maxpool':
- pass
- elif block['type'] == 'reorg':
- pass
- elif block['type'] == 'upsample':
- pass
- elif block['type'] == 'route':
- pass
- elif block['type'] == 'shortcut':
- pass
- elif block['type'] == 'sam':
- pass
- elif block['type'] == 'region':
- pass
- elif block['type'] == 'yolo':
- pass
- elif block['type'] == 'avgpool':
- pass
- elif block['type'] == 'softmax':
- pass
- elif block['type'] == 'cost':
- pass
- else:
- print('unknown type %s' % (block['type']))
\ No newline at end of file
diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/region_loss.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/region_loss.py
deleted file mode 100644
index 75d413376..000000000
--- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/region_loss.py
+++ /dev/null
@@ -1,195 +0,0 @@
-import torch.nn as nn
-import torch.nn.functional as F
-from darknet_pytorch.torch_utils import *
-
-
-def build_targets(pred_boxes, target, anchors, num_anchors, num_classes, nH, nW, noobject_scale, object_scale,
- sil_thresh, seen):
- nB = target.size(0)
- nA = num_anchors
- nC = num_classes
- anchor_step = len(anchors) / num_anchors
- conf_mask = torch.ones(nB, nA, nH, nW) * noobject_scale
- coord_mask = torch.zeros(nB, nA, nH, nW)
- cls_mask = torch.zeros(nB, nA, nH, nW)
- tx = torch.zeros(nB, nA, nH, nW)
- ty = torch.zeros(nB, nA, nH, nW)
- tw = torch.zeros(nB, nA, nH, nW)
- th = torch.zeros(nB, nA, nH, nW)
- tconf = torch.zeros(nB, nA, nH, nW)
- tcls = torch.zeros(nB, nA, nH, nW)
-
- nAnchors = nA * nH * nW
- nPixels = nH * nW
- for b in range(nB):
- cur_pred_boxes = pred_boxes[b * nAnchors:(b + 1) * nAnchors].t()
- cur_ious = torch.zeros(nAnchors)
- for t in range(50):
- if target[b][t * 5 + 1] == 0:
- break
- gx = target[b][t * 5 + 1] * nW
- gy = target[b][t * 5 + 2] * nH
- gw = target[b][t * 5 + 3] * nW
- gh = target[b][t * 5 + 4] * nH
- cur_gt_boxes = torch.FloatTensor([gx, gy, gw, gh]).repeat(nAnchors, 1).t()
- cur_ious = torch.max(cur_ious, bbox_ious(cur_pred_boxes, cur_gt_boxes, x1y1x2y2=False))
- conf_mask[b][cur_ious > sil_thresh] = 0
- if seen < 12800:
- if anchor_step == 4:
- tx = torch.FloatTensor(anchors).view(nA, anchor_step).index_select(1, torch.LongTensor([2])).view(1, nA, 1,
- 1).repeat(
- nB, 1, nH, nW)
- ty = torch.FloatTensor(anchors).view(num_anchors, anchor_step).index_select(1, torch.LongTensor([2])).view(
- 1, nA, 1, 1).repeat(nB, 1, nH, nW)
- else:
- tx.fill_(0.5)
- ty.fill_(0.5)
- tw.zero_()
- th.zero_()
- coord_mask.fill_(1)
-
- nGT = 0
- nCorrect = 0
- for b in range(nB):
- for t in range(50):
- if target[b][t * 5 + 1] == 0:
- break
- nGT = nGT + 1
- best_iou = 0.0
- best_n = -1
- min_dist = 10000
- gx = target[b][t * 5 + 1] * nW
- gy = target[b][t * 5 + 2] * nH
- gi = int(gx)
- gj = int(gy)
- gw = target[b][t * 5 + 3] * nW
- gh = target[b][t * 5 + 4] * nH
- gt_box = [0, 0, gw, gh]
- for n in range(nA):
- aw = anchors[anchor_step * n]
- ah = anchors[anchor_step * n + 1]
- anchor_box = [0, 0, aw, ah]
- iou = bbox_iou(anchor_box, gt_box, x1y1x2y2=False)
- if anchor_step == 4:
- ax = anchors[anchor_step * n + 2]
- ay = anchors[anchor_step * n + 3]
- dist = pow(((gi + ax) - gx), 2) + pow(((gj + ay) - gy), 2)
- if iou > best_iou:
- best_iou = iou
- best_n = n
- elif anchor_step == 4 and iou == best_iou and dist < min_dist:
- best_iou = iou
- best_n = n
- min_dist = dist
-
- gt_box = [gx, gy, gw, gh]
- pred_box = pred_boxes[b * nAnchors + best_n * nPixels + gj * nW + gi]
-
- coord_mask[b][best_n][gj][gi] = 1
- cls_mask[b][best_n][gj][gi] = 1
- conf_mask[b][best_n][gj][gi] = object_scale
- tx[b][best_n][gj][gi] = target[b][t * 5 + 1] * nW - gi
- ty[b][best_n][gj][gi] = target[b][t * 5 + 2] * nH - gj
- tw[b][best_n][gj][gi] = math.log(gw / anchors[anchor_step * best_n])
- th[b][best_n][gj][gi] = math.log(gh / anchors[anchor_step * best_n + 1])
- iou = bbox_iou(gt_box, pred_box, x1y1x2y2=False) # best_iou
- tconf[b][best_n][gj][gi] = iou
- tcls[b][best_n][gj][gi] = target[b][t * 5]
- if iou > 0.5:
- nCorrect = nCorrect + 1
-
- return nGT, nCorrect, coord_mask, conf_mask, cls_mask, tx, ty, tw, th, tconf, tcls
-
-
-class RegionLoss(nn.Module):
- def __init__(self, num_classes=0, anchors=[], num_anchors=1):
- super(RegionLoss, self).__init__()
- self.num_classes = num_classes
- self.anchors = anchors
- self.num_anchors = num_anchors
- self.anchor_step = len(anchors) / num_anchors
- self.coord_scale = 1
- self.noobject_scale = 1
- self.object_scale = 5
- self.class_scale = 1
- self.thresh = 0.6
- self.seen = 0
-
- def forward(self, output, target):
- # output : BxAs*(4+1+num_classes)*H*W
- t0 = time.time()
- nB = output.data.size(0)
- nA = self.num_anchors
- nC = self.num_classes
- nH = output.data.size(2)
- nW = output.data.size(3)
-
- output = output.view(nB, nA, (5 + nC), nH, nW)
- x = F.sigmoid(output.index_select(2, Variable(torch.cuda.LongTensor([0]))).view(nB, nA, nH, nW))
- y = F.sigmoid(output.index_select(2, Variable(torch.cuda.LongTensor([1]))).view(nB, nA, nH, nW))
- w = output.index_select(2, Variable(torch.cuda.LongTensor([2]))).view(nB, nA, nH, nW)
- h = output.index_select(2, Variable(torch.cuda.LongTensor([3]))).view(nB, nA, nH, nW)
- conf = F.sigmoid(output.index_select(2, Variable(torch.cuda.LongTensor([4]))).view(nB, nA, nH, nW))
- cls = output.index_select(2, Variable(torch.linspace(5, 5 + nC - 1, nC).long().cuda()))
- cls = cls.view(nB * nA, nC, nH * nW).transpose(1, 2).contiguous().view(nB * nA * nH * nW, nC)
- t1 = time.time()
-
- pred_boxes = torch.cuda.FloatTensor(4, nB * nA * nH * nW)
- grid_x = torch.linspace(0, nW - 1, nW).repeat(nH, 1).repeat(nB * nA, 1, 1).view(nB * nA * nH * nW).cuda()
- grid_y = torch.linspace(0, nH - 1, nH).repeat(nW, 1).t().repeat(nB * nA, 1, 1).view(nB * nA * nH * nW).cuda()
- anchor_w = torch.Tensor(self.anchors).view(nA, self.anchor_step).index_select(1, torch.LongTensor([0])).cuda()
- anchor_h = torch.Tensor(self.anchors).view(nA, self.anchor_step).index_select(1, torch.LongTensor([1])).cuda()
- anchor_w = anchor_w.repeat(nB, 1).repeat(1, 1, nH * nW).view(nB * nA * nH * nW)
- anchor_h = anchor_h.repeat(nB, 1).repeat(1, 1, nH * nW).view(nB * nA * nH * nW)
- pred_boxes[0] = x.data + grid_x
- pred_boxes[1] = y.data + grid_y
- pred_boxes[2] = torch.exp(w.data) * anchor_w
- pred_boxes[3] = torch.exp(h.data) * anchor_h
- pred_boxes = convert2cpu(pred_boxes.transpose(0, 1).contiguous().view(-1, 4))
- t2 = time.time()
-
- nGT, nCorrect, coord_mask, conf_mask, cls_mask, tx, ty, tw, th, tconf, tcls = build_targets(pred_boxes,
- target.data,
- self.anchors, nA,
- nC, \
- nH, nW,
- self.noobject_scale,
- self.object_scale,
- self.thresh,
- self.seen)
- cls_mask = (cls_mask == 1)
- nProposals = int((conf > 0.25).sum().data[0])
-
- tx = Variable(tx.cuda())
- ty = Variable(ty.cuda())
- tw = Variable(tw.cuda())
- th = Variable(th.cuda())
- tconf = Variable(tconf.cuda())
- tcls = Variable(tcls.view(-1)[cls_mask].long().cuda())
-
- coord_mask = Variable(coord_mask.cuda())
- conf_mask = Variable(conf_mask.cuda().sqrt())
- cls_mask = Variable(cls_mask.view(-1, 1).repeat(1, nC).cuda())
- cls = cls[cls_mask].view(-1, nC)
-
- t3 = time.time()
-
- loss_x = self.coord_scale * nn.MSELoss(reduction='sum')(x * coord_mask, tx * coord_mask) / 2.0
- loss_y = self.coord_scale * nn.MSELoss(reduction='sum')(y * coord_mask, ty * coord_mask) / 2.0
- loss_w = self.coord_scale * nn.MSELoss(reduction='sum')(w * coord_mask, tw * coord_mask) / 2.0
- loss_h = self.coord_scale * nn.MSELoss(reduction='sum')(h * coord_mask, th * coord_mask) / 2.0
- loss_conf = nn.MSELoss(reduction='sum')(conf * conf_mask, tconf * conf_mask) / 2.0
- loss_cls = self.class_scale * nn.CrossEntropyLoss(reduction='sum')(cls, tcls)
- loss = loss_x + loss_y + loss_w + loss_h + loss_conf + loss_cls
- t4 = time.time()
- if False:
- print('-----------------------------------')
- print(' activation : %f' % (t1 - t0))
- print(' create pred_boxes : %f' % (t2 - t1))
- print(' build targets : %f' % (t3 - t2))
- print(' create loss : %f' % (t4 - t3))
- print(' total : %f' % (t4 - t0))
- print('%d: nGT %d, recall %d, proposals %d, loss: x %f, y %f, w %f, h %f, conf %f, cls %f, total %f' % (
- self.seen, nGT, nCorrect, nProposals, loss_x.data[0], loss_y.data[0], loss_w.data[0], loss_h.data[0],
- loss_conf.data[0], loss_cls.data[0], loss.data[0]))
- return loss
diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/torch_utils.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/torch_utils.py
deleted file mode 100644
index 8a9b499ad..000000000
--- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/torch_utils.py
+++ /dev/null
@@ -1,105 +0,0 @@
-import sys
-import os
-import time
-import math
-import torch
-import numpy as np
-from torch.autograd import Variable
-
-import itertools
-import struct # get_image_size
-import imghdr # get_image_size
-
-from darknet_pytorch import utils
-
-
-def bbox_ious(boxes1, boxes2, x1y1x2y2=True):
- if x1y1x2y2:
- mx = torch.min(boxes1[0], boxes2[0])
- Mx = torch.max(boxes1[2], boxes2[2])
- my = torch.min(boxes1[1], boxes2[1])
- My = torch.max(boxes1[3], boxes2[3])
- w1 = boxes1[2] - boxes1[0]
- h1 = boxes1[3] - boxes1[1]
- w2 = boxes2[2] - boxes2[0]
- h2 = boxes2[3] - boxes2[1]
- else:
- mx = torch.min(boxes1[0] - boxes1[2] / 2.0, boxes2[0] - boxes2[2] / 2.0)
- Mx = torch.max(boxes1[0] + boxes1[2] / 2.0, boxes2[0] + boxes2[2] / 2.0)
- my = torch.min(boxes1[1] - boxes1[3] / 2.0, boxes2[1] - boxes2[3] / 2.0)
- My = torch.max(boxes1[1] + boxes1[3] / 2.0, boxes2[1] + boxes2[3] / 2.0)
- w1 = boxes1[2]
- h1 = boxes1[3]
- w2 = boxes2[2]
- h2 = boxes2[3]
- uw = Mx - mx
- uh = My - my
- cw = w1 + w2 - uw
- ch = h1 + h2 - uh
- mask = ((cw <= 0) + (ch <= 0) > 0)
- area1 = w1 * h1
- area2 = w2 * h2
- carea = cw * ch
- carea[mask] = 0
- uarea = area1 + area2 - carea
- return carea / uarea
-
-
-def get_region_boxes(boxes_and_confs):
-
- # print('Getting boxes from boxes and confs ...')
-
- boxes_list = []
- confs_list = []
-
- for item in boxes_and_confs:
- boxes_list.append(item[0])
- confs_list.append(item[1])
-
- # boxes: [batch, num1 + num2 + num3, 1, 4]
- # confs: [batch, num1 + num2 + num3, num_classes]
- boxes = torch.cat(boxes_list, dim=1)
- confs = torch.cat(confs_list, dim=1)
-
- return [boxes, confs]
-
-
-def convert2cpu(gpu_matrix):
- return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix)
-
-
-def convert2cpu_long(gpu_matrix):
- return torch.LongTensor(gpu_matrix.size()).copy_(gpu_matrix)
-
-
-
-def do_detect(model, img, conf_thresh, nms_thresh, use_cuda=1):
- model.eval()
- with torch.no_grad():
- t0 = time.time()
-
- if type(img) == np.ndarray and len(img.shape) == 3: # cv2 image
- img = torch.from_numpy(img.transpose(2, 0, 1)).float().div(255.0).unsqueeze(0)
- elif type(img) == np.ndarray and len(img.shape) == 4:
- img = torch.from_numpy(img.transpose(0, 3, 1, 2)).float().div(255.0)
- else:
- print("unknow image type")
- exit(-1)
-
- if use_cuda:
- img = img.cuda()
- img = torch.autograd.Variable(img)
-
- t1 = time.time()
-
- output = model(img)
-
- t2 = time.time()
-
- print('-----------------------------------')
- print(' Preprocess : %f' % (t1 - t0))
- print(' Model Inference : %f' % (t2 - t1))
- print('-----------------------------------')
-
- return utils.post_processing(img, conf_thresh, nms_thresh, output)
-
diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/utils.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/utils.py
deleted file mode 100644
index a42e62642..000000000
--- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/utils.py
+++ /dev/null
@@ -1,239 +0,0 @@
-import sys
-import os
-import time
-import math
-import numpy as np
-
-import itertools
-import struct # get_image_size
-import imghdr # get_image_size
-
-
-def sigmoid(x):
- return 1.0 / (np.exp(-x) + 1.)
-
-
-def softmax(x):
- x = np.exp(x - np.expand_dims(np.max(x, axis=1), axis=1))
- x = x / np.expand_dims(x.sum(axis=1), axis=1)
- return x
-
-
-def bbox_iou(box1, box2, x1y1x2y2=True):
-
- # print('iou box1:', box1)
- # print('iou box2:', box2)
-
- if x1y1x2y2:
- mx = min(box1[0], box2[0])
- Mx = max(box1[2], box2[2])
- my = min(box1[1], box2[1])
- My = max(box1[3], box2[3])
- w1 = box1[2] - box1[0]
- h1 = box1[3] - box1[1]
- w2 = box2[2] - box2[0]
- h2 = box2[3] - box2[1]
- else:
- w1 = box1[2]
- h1 = box1[3]
- w2 = box2[2]
- h2 = box2[3]
-
- mx = min(box1[0], box2[0])
- Mx = max(box1[0] + w1, box2[0] + w2)
- my = min(box1[1], box2[1])
- My = max(box1[1] + h1, box2[1] + h2)
- uw = Mx - mx
- uh = My - my
- cw = w1 + w2 - uw
- ch = h1 + h2 - uh
- carea = 0
- if cw <= 0 or ch <= 0:
- return 0.0
-
- area1 = w1 * h1
- area2 = w2 * h2
- carea = cw * ch
- uarea = area1 + area2 - carea
- return carea / uarea
-
-
-def nms_cpu(boxes, confs, nms_thresh=0.5, min_mode=False):
- # print(boxes.shape)
- x1 = boxes[:, 0]
- y1 = boxes[:, 1]
- x2 = boxes[:, 2]
- y2 = boxes[:, 3]
-
- areas = (x2 - x1) * (y2 - y1)
- order = confs.argsort()[::-1]
-
- keep = []
- while order.size > 0:
- idx_self = order[0]
- idx_other = order[1:]
-
- keep.append(idx_self)
-
- xx1 = np.maximum(x1[idx_self], x1[idx_other])
- yy1 = np.maximum(y1[idx_self], y1[idx_other])
- xx2 = np.minimum(x2[idx_self], x2[idx_other])
- yy2 = np.minimum(y2[idx_self], y2[idx_other])
-
- w = np.maximum(0.0, xx2 - xx1)
- h = np.maximum(0.0, yy2 - yy1)
- inter = w * h
-
- if min_mode:
- over = inter / np.minimum(areas[order[0]], areas[order[1:]])
- else:
- over = inter / (areas[order[0]] + areas[order[1:]] - inter)
-
- inds = np.where(over <= nms_thresh)[0]
- order = order[inds + 1]
-
- return np.array(keep)
-
-
-
-def plot_boxes_cv2(img, boxes, savename=None, class_names=None, color=None):
- import cv2
- img = np.copy(img)
- colors = np.array([[1, 0, 1], [0, 0, 1], [0, 1, 1], [0, 1, 0], [1, 1, 0], [1, 0, 0]], dtype=np.float32)
-
- def get_color(c, x, max_val):
- ratio = float(x) / max_val * 5
- i = int(math.floor(ratio))
- j = int(math.ceil(ratio))
- ratio = ratio - i
- r = (1 - ratio) * colors[i][c] + ratio * colors[j][c]
- return int(r * 255)
-
- width = img.shape[1]
- height = img.shape[0]
- for i in range(len(boxes)):
- box = boxes[i]
- x1 = int(box[0] * width)
- y1 = int(box[1] * height)
- x2 = int(box[2] * width)
- y2 = int(box[3] * height)
- bbox_thick = int(0.6 * (height + width) / 600)
- if color:
- rgb = color
- else:
- rgb = (255, 0, 0)
- if len(box) >= 7 and class_names:
- cls_conf = box[5]
- cls_id = box[6]
- print('%s: %f' % (class_names[cls_id], cls_conf))
- classes = len(class_names)
- offset = cls_id * 123457 % classes
- red = get_color(2, offset, classes)
- green = get_color(1, offset, classes)
- blue = get_color(0, offset, classes)
- if color is None:
- rgb = (red, green, blue)
- msg = str(class_names[cls_id])+" "+str(round(cls_conf,3))
- t_size = cv2.getTextSize(msg, 0, 0.7, thickness=bbox_thick // 2)[0]
- c1, c2 = (x1,y1), (x2, y2)
- c3 = (c1[0] + t_size[0], c1[1] - t_size[1] - 3)
- cv2.rectangle(img, (x1,y1), (np.float32(c3[0]), np.float32(c3[1])), rgb, -1)
- img = cv2.putText(img, msg, (c1[0], np.float32(c1[1] - 2)), cv2.FONT_HERSHEY_SIMPLEX,0.7, (0,0,0), bbox_thick//2,lineType=cv2.LINE_AA)
-
- img = cv2.rectangle(img, (x1, y1), (x2, y2), rgb, bbox_thick)
- if savename:
- print("save plot results to %s" % savename)
- cv2.imwrite(savename, img)
- return img
-
-
-def read_truths(lab_path):
- if not os.path.exists(lab_path):
- return np.array([])
- if os.path.getsize(lab_path):
- truths = np.loadtxt(lab_path)
- truths = truths.reshape(truths.size / 5, 5) # to avoid single truth problem
- return truths
- else:
- return np.array([])
-
-
-def load_class_names(namesfile):
- class_names = []
- with open(namesfile, 'r') as fp:
- lines = fp.readlines()
- for line in lines:
- line = line.rstrip()
- class_names.append(line)
- return class_names
-
-
-
-def post_processing(img, conf_thresh, nms_thresh, output):
-
- # anchors = [12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401]
- # num_anchors = 9
- # anchor_masks = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
- # strides = [8, 16, 32]
- # anchor_step = len(anchors) // num_anchors
-
- # [batch, num, 1, 4]
- box_array = output[0]
- # [batch, num, num_classes]
- confs = output[1]
-
- t1 = time.time()
-
- if type(box_array).__name__ != 'ndarray':
- box_array = box_array.cpu().detach().numpy()
- confs = confs.cpu().detach().numpy()
-
- num_classes = confs.shape[2]
-
- # [batch, num, 4]
- box_array = box_array[:, :, 0]
-
- # [batch, num, num_classes] --> [batch, num]
- max_conf = np.max(confs, axis=2)
- max_id = np.argmax(confs, axis=2)
-
- t2 = time.time()
-
- bboxes_batch = []
- for i in range(box_array.shape[0]):
-
- argwhere = max_conf[i] > conf_thresh
- l_box_array = box_array[i, argwhere, :]
- l_max_conf = max_conf[i, argwhere]
- l_max_id = max_id[i, argwhere]
-
- bboxes = []
- # nms for each class
- for j in range(num_classes):
-
- cls_argwhere = l_max_id == j
- ll_box_array = l_box_array[cls_argwhere, :]
- ll_max_conf = l_max_conf[cls_argwhere]
- ll_max_id = l_max_id[cls_argwhere]
-
- keep = nms_cpu(ll_box_array, ll_max_conf, nms_thresh)
-
- if (keep.size > 0):
- ll_box_array = ll_box_array[keep, :]
- ll_max_conf = ll_max_conf[keep]
- ll_max_id = ll_max_id[keep]
-
- for k in range(ll_box_array.shape[0]):
- bboxes.append([ll_box_array[k, 0], ll_box_array[k, 1], ll_box_array[k, 2], ll_box_array[k, 3], ll_max_conf[k], ll_max_conf[k], ll_max_id[k]])
-
- bboxes_batch.append(bboxes)
-
- t3 = time.time()
-
- print('-----------------------------------')
- print(' max and argmax : %f' % (t2 - t1))
- print(' nms : %f' % (t3 - t2))
- print('Post processing total : %f' % (t3 - t1))
- print('-----------------------------------')
-
- return bboxes_batch
diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/yolo_layer.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/yolo_layer.py
deleted file mode 100644
index 1ea21566b..000000000
--- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/yolo_layer.py
+++ /dev/null
@@ -1,322 +0,0 @@
-import torch.nn as nn
-import torch.nn.functional as F
-from darknet_pytorch.torch_utils import *
-
-def yolo_forward(output, conf_thresh, num_classes, anchors, num_anchors, scale_x_y, only_objectness=1,
- validation=False):
- # Output would be invalid if it does not satisfy this assert
- # assert (output.size(1) == (5 + num_classes) * num_anchors)
-
- # print(output.size())
-
- # Slice the second dimension (channel) of output into:
- # [ 2, 2, 1, num_classes, 2, 2, 1, num_classes, 2, 2, 1, num_classes ]
- # And then into
- # bxy = [ 6 ] bwh = [ 6 ] det_conf = [ 3 ] cls_conf = [ num_classes * 3 ]
- batch = output.size(0)
- H = output.size(2)
- W = output.size(3)
-
- bxy_list = []
- bwh_list = []
- det_confs_list = []
- cls_confs_list = []
-
- for i in range(num_anchors):
- begin = i * (5 + num_classes)
- end = (i + 1) * (5 + num_classes)
-
- bxy_list.append(output[:, begin : begin + 2])
- bwh_list.append(output[:, begin + 2 : begin + 4])
- det_confs_list.append(output[:, begin + 4 : begin + 5])
- cls_confs_list.append(output[:, begin + 5 : end])
-
- # Shape: [batch, num_anchors * 2, H, W]
- bxy = torch.cat(bxy_list, dim=1)
- # Shape: [batch, num_anchors * 2, H, W]
- bwh = torch.cat(bwh_list, dim=1)
-
- # Shape: [batch, num_anchors, H, W]
- det_confs = torch.cat(det_confs_list, dim=1)
- # Shape: [batch, num_anchors * H * W]
- det_confs = det_confs.view(batch, num_anchors * H * W)
-
- # Shape: [batch, num_anchors * num_classes, H, W]
- cls_confs = torch.cat(cls_confs_list, dim=1)
- # Shape: [batch, num_anchors, num_classes, H * W]
- cls_confs = cls_confs.view(batch, num_anchors, num_classes, H * W)
- # Shape: [batch, num_anchors, num_classes, H * W] --> [batch, num_anchors * H * W, num_classes]
- cls_confs = cls_confs.permute(0, 1, 3, 2).reshape(batch, num_anchors * H * W, num_classes)
-
- # Apply sigmoid(), exp() and softmax() to slices
- #
- bxy = torch.sigmoid(bxy) * scale_x_y - 0.5 * (scale_x_y - 1)
- bwh = torch.exp(bwh)
- det_confs = torch.sigmoid(det_confs)
- cls_confs = torch.sigmoid(cls_confs)
-
- # Prepare C-x, C-y, P-w, P-h (None of them are torch related)
- grid_x = np.expand_dims(np.expand_dims(np.expand_dims(np.linspace(0, W - 1, W), axis=0).repeat(H, 0), axis=0), axis=0)
- grid_y = np.expand_dims(np.expand_dims(np.expand_dims(np.linspace(0, H - 1, H), axis=1).repeat(W, 1), axis=0), axis=0)
- # grid_x = torch.linspace(0, W - 1, W).reshape(1, 1, 1, W).repeat(1, 1, H, 1)
- # grid_y = torch.linspace(0, H - 1, H).reshape(1, 1, H, 1).repeat(1, 1, 1, W)
-
- anchor_w = []
- anchor_h = []
- for i in range(num_anchors):
- anchor_w.append(anchors[i * 2])
- anchor_h.append(anchors[i * 2 + 1])
-
- device = None
- cuda_check = output.is_cuda
- if cuda_check:
- device = output.get_device()
-
- bx_list = []
- by_list = []
- bw_list = []
- bh_list = []
-
- # Apply C-x, C-y, P-w, P-h
- for i in range(num_anchors):
- ii = i * 2
- # Shape: [batch, 1, H, W]
- bx = bxy[:, ii : ii + 1] + torch.tensor(grid_x, device=device, dtype=torch.float32) # grid_x.to(device=device, dtype=torch.float32)
- # Shape: [batch, 1, H, W]
- by = bxy[:, ii + 1 : ii + 2] + torch.tensor(grid_y, device=device, dtype=torch.float32) # grid_y.to(device=device, dtype=torch.float32)
- # Shape: [batch, 1, H, W]
- bw = bwh[:, ii : ii + 1] * anchor_w[i]
- # Shape: [batch, 1, H, W]
- bh = bwh[:, ii + 1 : ii + 2] * anchor_h[i]
-
- bx_list.append(bx)
- by_list.append(by)
- bw_list.append(bw)
- bh_list.append(bh)
-
-
- ########################################
- # Figure out bboxes from slices #
- ########################################
-
- # Shape: [batch, num_anchors, H, W]
- bx = torch.cat(bx_list, dim=1)
- # Shape: [batch, num_anchors, H, W]
- by = torch.cat(by_list, dim=1)
- # Shape: [batch, num_anchors, H, W]
- bw = torch.cat(bw_list, dim=1)
- # Shape: [batch, num_anchors, H, W]
- bh = torch.cat(bh_list, dim=1)
-
- # Shape: [batch, 2 * num_anchors, H, W]
- bx_bw = torch.cat((bx, bw), dim=1)
- # Shape: [batch, 2 * num_anchors, H, W]
- by_bh = torch.cat((by, bh), dim=1)
-
- # normalize coordinates to [0, 1]
- bx_bw /= W
- by_bh /= H
-
- # Shape: [batch, num_anchors * H * W, 1]
- bx = bx_bw[:, :num_anchors].view(batch, num_anchors * H * W, 1)
- by = by_bh[:, :num_anchors].view(batch, num_anchors * H * W, 1)
- bw = bx_bw[:, num_anchors:].view(batch, num_anchors * H * W, 1)
- bh = by_bh[:, num_anchors:].view(batch, num_anchors * H * W, 1)
-
- bx1 = bx - bw * 0.5
- by1 = by - bh * 0.5
- bx2 = bx1 + bw
- by2 = by1 + bh
-
- # Shape: [batch, num_anchors * h * w, 4] -> [batch, num_anchors * h * w, 1, 4]
- boxes = torch.cat((bx1, by1, bx2, by2), dim=2).view(batch, num_anchors * H * W, 1, 4)
- # boxes = boxes.repeat(1, 1, num_classes, 1)
-
- # boxes: [batch, num_anchors * H * W, 1, 4]
- # cls_confs: [batch, num_anchors * H * W, num_classes]
- # det_confs: [batch, num_anchors * H * W]
-
- det_confs = det_confs.view(batch, num_anchors * H * W, 1)
- confs = cls_confs * det_confs
-
- # boxes: [batch, num_anchors * H * W, 1, 4]
- # confs: [batch, num_anchors * H * W, num_classes]
-
- return boxes, confs
-
-
-def yolo_forward_dynamic(output, conf_thresh, num_classes, anchors, num_anchors, scale_x_y, only_objectness=1,
- validation=False):
- # Output would be invalid if it does not satisfy this assert
- # assert (output.size(1) == (5 + num_classes) * num_anchors)
-
- # print(output.size())
-
- # Slice the second dimension (channel) of output into:
- # [ 2, 2, 1, num_classes, 2, 2, 1, num_classes, 2, 2, 1, num_classes ]
- # And then into
- # bxy = [ 6 ] bwh = [ 6 ] det_conf = [ 3 ] cls_conf = [ num_classes * 3 ]
- # batch = output.size(0)
- # H = output.size(2)
- # W = output.size(3)
-
- bxy_list = []
- bwh_list = []
- det_confs_list = []
- cls_confs_list = []
-
- for i in range(num_anchors):
- begin = i * (5 + num_classes)
- end = (i + 1) * (5 + num_classes)
-
- bxy_list.append(output[:, begin : begin + 2])
- bwh_list.append(output[:, begin + 2 : begin + 4])
- det_confs_list.append(output[:, begin + 4 : begin + 5])
- cls_confs_list.append(output[:, begin + 5 : end])
-
- # Shape: [batch, num_anchors * 2, H, W]
- bxy = torch.cat(bxy_list, dim=1)
- # Shape: [batch, num_anchors * 2, H, W]
- bwh = torch.cat(bwh_list, dim=1)
-
- # Shape: [batch, num_anchors, H, W]
- det_confs = torch.cat(det_confs_list, dim=1)
- # Shape: [batch, num_anchors * H * W]
- det_confs = det_confs.view(output.size(0), num_anchors * output.size(2) * output.size(3))
-
- # Shape: [batch, num_anchors * num_classes, H, W]
- cls_confs = torch.cat(cls_confs_list, dim=1)
- # Shape: [batch, num_anchors, num_classes, H * W]
- cls_confs = cls_confs.view(output.size(0), num_anchors, num_classes, output.size(2) * output.size(3))
- # Shape: [batch, num_anchors, num_classes, H * W] --> [batch, num_anchors * H * W, num_classes]
- cls_confs = cls_confs.permute(0, 1, 3, 2).reshape(output.size(0), num_anchors * output.size(2) * output.size(3), num_classes)
-
- # Apply sigmoid(), exp() and softmax() to slices
- #
- bxy = torch.sigmoid(bxy) * scale_x_y - 0.5 * (scale_x_y - 1)
- bwh = torch.exp(bwh)
- det_confs = torch.sigmoid(det_confs)
- cls_confs = torch.sigmoid(cls_confs)
-
- # Prepare C-x, C-y, P-w, P-h (None of them are torch related)
- grid_x = np.expand_dims(np.expand_dims(np.expand_dims(np.linspace(0, output.size(3) - 1, output.size(3)), axis=0).repeat(output.size(2), 0), axis=0), axis=0)
- grid_y = np.expand_dims(np.expand_dims(np.expand_dims(np.linspace(0, output.size(2) - 1, output.size(2)), axis=1).repeat(output.size(3), 1), axis=0), axis=0)
- # grid_x = torch.linspace(0, W - 1, W).reshape(1, 1, 1, W).repeat(1, 1, H, 1)
- # grid_y = torch.linspace(0, H - 1, H).reshape(1, 1, H, 1).repeat(1, 1, 1, W)
-
- anchor_w = []
- anchor_h = []
- for i in range(num_anchors):
- anchor_w.append(anchors[i * 2])
- anchor_h.append(anchors[i * 2 + 1])
-
- device = None
- cuda_check = output.is_cuda
- if cuda_check:
- device = output.get_device()
-
- bx_list = []
- by_list = []
- bw_list = []
- bh_list = []
-
- # Apply C-x, C-y, P-w, P-h
- for i in range(num_anchors):
- ii = i * 2
- # Shape: [batch, 1, H, W]
- bx = bxy[:, ii : ii + 1] + torch.tensor(grid_x, device=device, dtype=torch.float32) # grid_x.to(device=device, dtype=torch.float32)
- # Shape: [batch, 1, H, W]
- by = bxy[:, ii + 1 : ii + 2] + torch.tensor(grid_y, device=device, dtype=torch.float32) # grid_y.to(device=device, dtype=torch.float32)
- # Shape: [batch, 1, H, W]
- bw = bwh[:, ii : ii + 1] * anchor_w[i]
- # Shape: [batch, 1, H, W]
- bh = bwh[:, ii + 1 : ii + 2] * anchor_h[i]
-
- bx_list.append(bx)
- by_list.append(by)
- bw_list.append(bw)
- bh_list.append(bh)
-
-
- ########################################
- # Figure out bboxes from slices #
- ########################################
-
- # Shape: [batch, num_anchors, H, W]
- bx = torch.cat(bx_list, dim=1)
- # Shape: [batch, num_anchors, H, W]
- by = torch.cat(by_list, dim=1)
- # Shape: [batch, num_anchors, H, W]
- bw = torch.cat(bw_list, dim=1)
- # Shape: [batch, num_anchors, H, W]
- bh = torch.cat(bh_list, dim=1)
-
- # Shape: [batch, 2 * num_anchors, H, W]
- bx_bw = torch.cat((bx, bw), dim=1)
- # Shape: [batch, 2 * num_anchors, H, W]
- by_bh = torch.cat((by, bh), dim=1)
-
- # normalize coordinates to [0, 1]
- bx_bw /= output.size(3)
- by_bh /= output.size(2)
-
- # Shape: [batch, num_anchors * H * W, 1]
- bx = bx_bw[:, :num_anchors].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1)
- by = by_bh[:, :num_anchors].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1)
- bw = bx_bw[:, num_anchors:].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1)
- bh = by_bh[:, num_anchors:].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1)
-
- bx1 = bx - bw * 0.5
- by1 = by - bh * 0.5
- bx2 = bx1 + bw
- by2 = by1 + bh
-
- # Shape: [batch, num_anchors * h * w, 4] -> [batch, num_anchors * h * w, 1, 4]
- boxes = torch.cat((bx1, by1, bx2, by2), dim=2).view(output.size(0), num_anchors * output.size(2) * output.size(3), 1, 4)
- # boxes = boxes.repeat(1, 1, num_classes, 1)
-
- # boxes: [batch, num_anchors * H * W, 1, 4]
- # cls_confs: [batch, num_anchors * H * W, num_classes]
- # det_confs: [batch, num_anchors * H * W]
-
- det_confs = det_confs.view(output.size(0), num_anchors * output.size(2) * output.size(3), 1)
- confs = cls_confs * det_confs
-
- # boxes: [batch, num_anchors * H * W, 1, 4]
- # confs: [batch, num_anchors * H * W, num_classes]
-
- return boxes, confs
-
-class YoloLayer(nn.Module):
- ''' Yolo layer
- model_out: while inference,is post-processing inside or outside the model
- true:outside
- '''
- def __init__(self, anchor_mask=[], num_classes=0, anchors=[], num_anchors=1, stride=32, model_out=False):
- super(YoloLayer, self).__init__()
- self.anchor_mask = anchor_mask
- self.num_classes = num_classes
- self.anchors = anchors
- self.num_anchors = num_anchors
- self.anchor_step = len(anchors) // num_anchors
- self.coord_scale = 1
- self.noobject_scale = 1
- self.object_scale = 5
- self.class_scale = 1
- self.thresh = 0.6
- self.stride = stride
- self.seen = 0
- self.scale_x_y = 1
-
- self.model_out = model_out
-
- def forward(self, output, target=None):
- if self.training:
- return output
- masked_anchors = []
- for m in self.anchor_mask:
- masked_anchors += self.anchors[m * self.anchor_step:(m + 1) * self.anchor_step]
- masked_anchors = [anchor / self.stride for anchor in masked_anchors]
-
- return yolo_forward_dynamic(output, self.thresh, self.num_classes, masked_anchors, len(self.anchor_mask),scale_x_y=self.scale_x_y)
-
diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/detect_objects.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/detect_objects.py
deleted file mode 100644
index c799216d1..000000000
--- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/detect_objects.py
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env python3
-
-import rospy
-from lasr_vision_msgs.srv import YoloDetection, YoloDetectionRequest
-from sensor_msgs.msg import Image
-
-
-def detect_objects(object_names: [str], model="coco"):
- """
- Detects all persons in an image
- """
- rospy.wait_for_service('yolo_object_detection_server/detect_objects')
-
- if not isinstance(object_names, list):
- raise ValueError("please input a list of strings")
-
- try:
- detect_objects = rospy.ServiceProxy('yolo_object_detection_server/detect_objects', YoloDetection)
- image_msg = rospy.wait_for_message('/xtion/rgb/image_raw', Image) # wait for rgb image
-
- req = YoloDetectionRequest()
- req.image_raw = image_msg
- req.dataset = model
- req.confidence = 0.7
- req.nms = 0.3
-
- yolo_resp = detect_objects(req)
- except rospy.ServiceException as e:
- print("Service call failed: %s" % e)
- return []
-
- objects = []
- for obj in yolo_resp.detected_objects:
- if obj.name in object_names:
- objects.append(obj)
- return objects
-
-
-if __name__ == '__main__':
- rospy.init_node("objects_detection", anonymous=True)
- objects = detect_objects(["person", "mug", "phone"])
- for o in objects:
- print("object name: ", o.name)
- print("object confidence: ", o.confidence)
- print("object position: ", o.xywh)
-
diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/detect_objects_v8.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/detect_objects_v8.py
deleted file mode 100755
index f71cf2a91..000000000
--- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/detect_objects_v8.py
+++ /dev/null
@@ -1,314 +0,0 @@
-#!/usr/bin/env python3
-
-import rospy
-from lasr_vision_msgs.srv import YoloDetection, YoloDetectionRequest
-from sensor_msgs.msg import Image
-from common_math import pcl_msg_to_cv2, seg_to_centroid
-from std_msgs.msg import String
-from geometry_msgs.msg import PointStamped, Point
-from tf_module.srv import TfTransform, TfTransformRequest
-import numpy as np
-from lasr_shapely import LasrShapely
-from cv_bridge3 import CvBridge
-from sensor_msgs.msg import PointCloud2
-import cv2
-# from tiago_controllers.helpers.nav_map_helpers import is_close_to_object, rank
-
-
-
-def detect_objects(object_names: [str], confidence=0.25, nms=0.4, model="yolov8n.pt"):
- """
- Detects all persons in an image using yolov8
- """
- rospy.wait_for_service("/yolov8/detect", rospy.Duration(15.0))
-
- if not isinstance(object_names, list):
- raise ValueError("please input a list of strings")
-
- try:
- detect_service = rospy.ServiceProxy('/yolov8/detect', YoloDetection)
-
- if rospy.get_published_topics(namespace='/camera/image_raw'):
- image_msg = rospy.wait_for_message('/camera/image_raw', Image) # wait for depth image
- elif rospy.get_published_topics(namespace='/xtion/rgb/image_raw'):
- image_msg = rospy.wait_for_message('/xtion/rgb/image_raw', Image)
- elif rospy.get_published_topics(namespace='/usb_cam/image_raw'):
- image_msg = rospy.wait_for_message('/usb_cam/image_raw', Image) # wait for rgb image
-
-
- req = YoloDetectionRequest()
- req.image_raw = image_msg
- req.dataset = model
- req.confidence = confidence
- req.nms = nms
- resp = detect_service(req)
- print(resp)
-
- except rospy.ServiceException as e:
- print("Service call failed: %s" % e)
- return []
-
- objects = []
- for obj in resp.detected_objects:
- if obj.name in object_names:
- objects.append(obj)
- return objects
-
-def estimate_pose(tf, pcl_msg, detection):
- centroid_xyz = seg_to_centroid(pcl_msg, np.array(detection.xyseg))
- centroid = PointStamped()
- centroid.point = Point(*centroid_xyz)
- centroid.header = pcl_msg.header
- tf_req = TfTransformRequest()
- tf_req.target_frame = String("map")
- tf_req.point = centroid
- response = tf(tf_req)
- # print("response")
- # print(response)
- return np.array([response.target_point.point.x, response.target_point.point.y, response.target_point.point.z])
-# def perform_detection(yolo,tf, bridge, shapely, pcl_msg, polygon, filter, model="yolov8n-seg.pt"):
-# cv_im = pcl_msg_to_cv2(pcl_msg)
-# img_msg = bridge.cv2_to_imgmsg(cv_im)
-# detections = yolo(img_msg, model, 0.5, 0.3)
-# rospy.loginfo(detections)
-# detections = [(det, estimate_pose(tf, pcl_msg, det)) for det in detections.detected_objects if
-# det.name in filter]
-# rospy.loginfo(f"All: {[(det.name, pose) for det, pose in detections]}")
-# rospy.loginfo(f"Boundary: {polygon}")
-# satisfied_points = shapely.are_points_in_polygon_2d(polygon, [[pose[0], pose[1]] for (_, pose) in
-# detections]).inside
-# detections = [detections[i] for i in range(0, len(detections)) if satisfied_points[i]]
-# rospy.loginfo(f"Filtered: {[(det.name, pose) for det, pose in detections]}")
-# print(len(detections))
-# return detections, img_msg
-
-
-# for the comp
-def perform_detection(default, pcl_msg, polygon, filter, model="yolov8n-seg.pt"):
- cv_im = pcl_msg_to_cv2(pcl_msg)
- img_msg = default.bridge.cv2_to_imgmsg(cv_im)
- detections = default.yolo(img_msg, model, 0.5, 0.3)
- # rospy.loginfo(detections)
- detections = [(det, estimate_pose(default.tf, pcl_msg, det)) for det in detections.detected_objects if
- det.name in filter]
-
- # rospy.loginfo(f"All: {[(det.name, pose) for det, pose in detections]}")
- # rospy.loginfo(f"Boundary: {polygon}")
- if polygon is None:
- return detections, img_msg
- else:
- satisfied_points = default.shapely.are_points_in_polygon_2d(polygon, [[pose[0], pose[1]] for (_, pose) in
- detections]).inside
- detections = [detections[i] for i in range(0, len(detections)) if satisfied_points[i]]
- # rospy.loginfo(f"Filtoprintered: {[(det.name, pose) for det, pose in detections]}")
- print("len detections --->")
- print(len(detections))
- return detections, img_msg
-
-
-def debug(image, resp):
- rospy.loginfo("Received image message")
- try:
- # pick the first detection as an example
- if len(resp) > 0:
- detection = resp[0][0]
-
- if len(detection.xyseg) > 0:
- # unflatten the array
- contours = np.array(detection.xyseg).reshape(-1, 2)
-
- # draw using opencv
- img = np.zeros((image.height, image.width), dtype=np.uint8)
- cv2.fillPoly(img, pts = [contours], color = (255,255,255))
-
- # send to topic
- # img_msg = bridge.cv2_to_imgmsg(img, encoding="passthrough")
- # debug_publisher1.publish(img_msg)
- debug_publisher1.publish(image)
- else:
- print('WARN: No segmentation was performed on the image!')
- else:
- print('WARN: no detections')
-
- # draw all of them
- if len(resp) > 0:
- img = np.zeros((image.height, image.width), dtype=np.uint8)
- for detection in resp:
- detection = detection[0]
- if len(detection.xyseg) > 0:
- contours = np.array(detection.xyseg).reshape(-1, 2)
- r,g,b = np.random.randint(0, 255, size=3)
- cv2.fillPoly(img, pts = [contours], color = (int(r), int(g), int(b)))
- img_msg = bridge.cv2_to_imgmsg(img, encoding="passthrough")
- debug_publisher2.publish(img_msg)
- else:
- print('WARN: no detections')
-
- except rospy.ServiceException as e:
- rospy.logerr("Service call failed: %s" % e)
-
-def is_anyone_in_front_of_me(yolo,tf, bridge, shapely, pcl_msg, polygon, filter, model="yolov8n-seg.pt"):
- pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2)
- polygon = rospy.get_param('lift_position_points')
- print(polygon)
- detections, im = perform_detection(yolo, tf, bridge, shapely, pcl_msg, polygon, filter, model="yolov8n-seg.pt")
- return len(detections) > 0
-
-DIST_THRESH = 0.1
-
-def euclidean_distance(point1, point2):
- return np.sqrt((point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2 + (point1[2] - point2[2]) ** 2)
-# def match_detections_between_frames(det1, det2):
-# matches = {}
-#
-# for i, position1 in enumerate(det1):
-# closest_distance = float('inf')
-# closest_position = None
-#
-# for j, position2 in enumerate(det2):
-# distance = euclidean_distance(position1, position2)
-# if distance < closest_distance:
-# closest_distance = distance
-# closest_position = position2
-#
-# if closest_position is not None:
-# matches[i] = closest_position
-#
-# robot_position = [robot_x, robot_y, robot_z]
-#
-# for i, position2 in matches.values():
-# vector = np.array(position2) - np.array(det1[i])
-#
-# vector_to_robot = np.array(robot_position) - np.array(det1[i])
-# dot_product = np.dot(vector, vector_to_robot)
-#
-# if dot_product > 0:
-# print(f"Position {i + 1} in frame 1 faces the robot.")
-# else:
-# print(f"Position {i + 1} in frame 1 does not face the robot.")
-#
-# static_pos = []
-# moving_pos = []
-#
-# for i, position2 in matches.items():
-# initial_position = det1[i]
-# final_position = position2
-#
-# initial_distance_to_robot = euclidean_distance(initial_position, robot_position)
-# final_distance_to_robot = euclidean_distance(final_position, robot_position)
-#
-# if final_distance_to_robot < initial_distance_to_robot:
-# # moved closer
-# moving_pos.append(i)
-# elif final_distance_to_robot > initial_distance_to_robot:
-# #mode further
-# pass
-# else:
-# # remained the same
-# static_pos.append(i)
-#
-# # face the quat
-# return static_pos, moving_pos
-
-
-def phase1(yolo, tf, bridge, shapely, pcl_msg, polygon, filter, model):
- # get two frames at the time
- pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2)
- polygon = rospy.get_param('test_lift_points')
- detections1, im = perform_detection(yolo, tf, bridge, shapely, pcl_msg, polygon, filter, model)
- rospy.sleep(5)
- pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2)
- detections2, im = perform_detection(yolo, tf, bridge, shapely, pcl_msg, polygon, filter, model)
- while len(detections1) != len(detections2):
- detections1, im = perform_detection(yolo, tf, bridge, shapely, pcl_msg, polygon, filter, model)
- rospy.sleep(5)
- pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2)
- detections2, im = perform_detection(yolo, tf, bridge, shapely, pcl_msg, polygon, filter, model)
-
- # match the detections based on euclidean distance
- matching = {}
- not_matching = []
- for i, det1 in enumerate(detections1):
- for j, det2 in enumerate(detections2):
- if np.linalg.norm(det1[1] - det2[1]) < DIST_THRESH:
- matching[i] = j
- else:
- not_matching.append(j)
-
- # print(not_matching)
-
-
-
-
- # if the
-
-
-
-
-
-if __name__ == '__main__':
- rospy.init_node("objects_detection_yolov8", anonymous=True)
-
- # object detection yolo v8
- # objects = detect_objects(["person", "mug", "phone"])
- # for o in objects:
- # print("object name: ", o.name)
- # print("object confidence: ", o.confidence)
- # print("object position: ", o.xywh)
- debug_publisher1 = rospy.Publisher('/yolov8/debug_mask', Image, queue_size=1)
- debug_publisher2 = rospy.Publisher('/yolov8/debug_mask_all', Image, queue_size=1)
-
- # perform detection and segmentation
- rospy.wait_for_service('/yolov8/detect')
- yolo = rospy.ServiceProxy('/yolov8/detect', YoloDetection)
- rospy.wait_for_service("/tf_transform")
- tf = rospy.ServiceProxy("/tf_transform", TfTransform)
- shapely = LasrShapely()
- rospy.loginfo("Got shapely")
- bridge = CvBridge()
- rospy.loginfo("CV Bridge")
-
- # yolo seg only
- # print("now calling perform detection")
- # pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2)
- # polygon = rospy.get_param('test_lift_points')
- # detections, im = perform_detection(yolo, tf, bridge, shapely, pcl_msg, polygon, ["person"], "yolov8n-seg.pt")
- # debug(im, detections)
- # print("now printing detections")
- # print(len(detections))
- # pos_people = []
- # for i, person in detections:
- # print(person)
- # pos_people.append([person[0], person[1]])
- #
- # print(person[0], person[1])
- #
- # print(pos_people)
- # yolo seg only
-
- pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2)
- polygon = rospy.get_param('test_lift_points')
- detections, im = perform_detection(yolo, tf, bridge, shapely, pcl_msg, polygon, ["person"], "yolov8n-seg.pt")
-
- pos_people = []
- for i, person in detections:
- # print(person)
- person = person.tolist()
- # print(type(person))
- pos_people.append([person[0], person[1]])
-
- num_people = len(detections)
-
- rospy.set_param("/lift/num_people", num_people)
- rospy.set_param("/lift/pos_persons", pos_people)
-
- pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2)
- polygon = rospy.get_param('test_lift_points')
- # print("is anyone in front of me >")
- # print(is_anyone_in_front_of_me(yolo, tf, bridge, shapely, pcl_msg, polygon, ["person"], "yolov8n-seg.pt"))
- # print("is anyone in front of me")
-
- # detections of i and then th esecond from the tuple
- # print(detections[0][1])
- rospy.spin()
-
diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/yolo_client.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/yolo_client.py
deleted file mode 100755
index 9a6161588..000000000
--- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/yolo_client.py
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/usr/bin/env python3
-
-import rospy
-
-from lasr_vision_msgs.srv import YoloDetection, YoloDetectionRequest
-from sensor_msgs.msg import Image
-from cv_bridge3 import CvBridge
-import cv2
-
-if __name__ == '__main__':
- rospy.init_node("yolo_client", anonymous=True)
- bridge = CvBridge()
- while not rospy.is_shutdown():
- rospy.wait_for_service('yolo_object_detection_server/detect_objects')
- im = rospy.wait_for_message('/usb_cam/image_raw', Image)
- image = bridge.imgmsg_to_cv2(im, desired_encoding='passthrough')
- cv2.imwrite('hello.jpg', image)
- req = YoloDetectionRequest()
- req.image_raw= im
- req.dataset ='coco'
- req.confidence = 0.5
- req.nms = 0.3
- print('hello')
- server = rospy.ServiceProxy('yolo_object_detection_server/detect_objects', YoloDetection)
- detections = server(req).detected_objects
- print(
- f"DETECTED:{detections}\n"
- )
\ No newline at end of file
diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/yolo_object_detection_server.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/yolo_object_detection_server.py
deleted file mode 100755
index 648e6b35b..000000000
--- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/yolo_object_detection_server.py
+++ /dev/null
@@ -1,184 +0,0 @@
-#!/usr/bin/env python3
-import rospy
-import rospkg
-
-import torch
-import torchvision.transforms as transforms
-
-import cv2
-from PIL import Image as PIL_Image
-import numpy as np
-
-import os
-import time
-
-from darknet_pytorch.darknet import Darknet
-from darknet_pytorch.utils import post_processing
-
-from lasr_vision_msgs.srv import YoloDetection, YoloDetectionResponse
-from lasr_vision_msgs.msg import Detection
-
-import nvidia_smi
-
-
-MODEL_ROOT = os.path.join(rospkg.RosPack().get_path('lasr_object_detection_yolo'), 'models')
-
-
-def transform():
- return transforms.Compose([
- transforms.Resize((416, 416)),
- transforms.ToTensor(),
- ])
-
-class YoloObjectDetectionServer():
-
- def __init__(self):
-
- self.model_name = None
- self.yolov4 = None
- self.labels = []
- self.device = 'cpu'
-
- def load_model(self, model_name):
- model_path = os.path.join(MODEL_ROOT, model_name)
-
- if os.path.isdir(model_path):
- self.model_name = model_name
-
- start_time = time.time()
-
- try:
-
- with open(os.path.join(model_path, 'classes.txt')) as fp:
- self.labels = fp.read().strip().splitlines()
-
- except FileNotFoundError:
- rospy.logerr(f"Couldn't load {self.model_name}, 'classes.txt' does not exist in {model_path}.")
- return False
-
- try:
-
- with open(os.path.join(model_path, 'yolov4.cfg')) as fp:
- self.yolov4 = Darknet(os.path.join(model_path, 'yolov4.cfg'))
-
- except FileNotFoundError:
- rospy.logerr(f"Couldn't load {self.model_name}, 'yolov4.cfg' does not exist in {model_path}.")
- return False
-
- try:
- self.yolov4.load_weights(os.path.join(model_path, 'yolov4.weights'))
-
- except FileNotFoundError:
- rospy.logerr(f"Couldn't load {self.model_name}, 'yolov4.weights' does not exist in {model_path}.")
- return False
-
- self.yolov4.eval()
- """
- if torch.cuda.is_available():
-
- # Initialise nvidia-smi
- nvidia_smi.nvmlInit()
-
- # Assume a single GPU.
- handle = nvidia_smi.nvmlDeviceGetHandleByIndex(0)
-
- # Get GPU memory info.
- info = nvidia_smi.nvmlDeviceGetMemoryInfo(handle)
-
- if info.free / 1024**2 > 1024:
- self.device = 'cuda'
-
- # Shutdown nvidia-smi
- nvidia_smi.nvmlShutdown()
- """
- self.device = "cpu"
-
- print(self.device)
- self.yolov4.to(self.device)
-
- rospy.loginfo('Time to load {} model: {:.2f} seconds'.format(model_name, time.time() - start_time))
-
- return True
-
- def detect(self, req):
-
- response = YoloDetectionResponse()
-
- # Only load model if it is not already loaded.
- if not self.model_name == req.dataset:
- if not self.load_model(req.dataset):
- raise rospy.ServiceException(f"Couldn't load model '{req.dataset}'")
-
- # Random colours for bounding boxes.
- np.random.seed(42)
- COLOURS = np.random.randint(0, 255, size=(len(self.labels), 3), dtype="uint8")
-
- # Perform pre-processing.
- frame = np.frombuffer(req.image_raw.data, dtype=np.uint8).reshape(req.image_raw.height, req.image_raw.width, -1)
- image = PIL_Image.fromarray(frame)
- image = torch.stack([transform()(image)]).to(self.device)
-
- outputs = self.yolov4(image)
-
- # net forward and non-mean suppression
- #try:
- #except RuntimeError:
- # if self.device != 'cpu':
- # self.device = 'cpu'
- #self.yolov4.to(self.device)
- # return self.detect(req)
- # else:
- # raise rospy.ServiceException("Couldn't use CUDA or CPU....")
- outputs = post_processing(image, req.confidence, req.nms, outputs)
-
- if not outputs[0] is None:
- for detection in outputs[0]:
-
- # Class ID of detection.
- idx = np.argmax(detection[5:])
-
- # Convert bounding box to image co-ordinates.
- bbox = detection[:4]
- bbox[0] *= req.image_raw.width
- bbox[1] *= req.image_raw.height
- bbox[2] *= req.image_raw.width
- bbox[3] *= req.image_raw.height
- x1,y1,x2,y2 = [int(i) for i in bbox]
-
-
- obj_conf, class_score = detection[4:6]
- class_pred = int(detection[6])
-
- # Draw and label bounding box of object in the frame.
- name = self.labels[class_pred]
- confidence = class_score
- x, y, w, h = x1, y1, x2 - x1, y2 - y1
- color = [int(c) for c in COLOURS[idx]]
- cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
- text = "{}: {:.4f}".format(name, confidence)
- cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
-
- # Check detection is above the confidence threshold.
- if class_score > req.confidence:
- xywh = [x1, y1, x2 - x1, y2 - y1]
-
- # Append detection.
- response.detected_objects.append(
- Detection(
- name=name,
- confidence=class_score,
- xywh=xywh
- )
- )
- print(response.detected_objects, 'i am in yolo detect ')
- return response
-
-if __name__ == '__main__':
- rospy.init_node('yolo_object_detection_server')
- server = YoloObjectDetectionServer()
- serv = rospy.Service('yolo_object_detection_server/detect_objects', YoloDetection, server.detect)
- rospy.loginfo('YOLOv4 object detection service initialised')
- rospy.spin()
-
-
-
diff --git a/legacy/lasr_object_detection_yolo/srv/YoloDetection.srv b/legacy/lasr_object_detection_yolo/srv/YoloDetection.srv
deleted file mode 100644
index f23dbf29f..000000000
--- a/legacy/lasr_object_detection_yolo/srv/YoloDetection.srv
+++ /dev/null
@@ -1,6 +0,0 @@
-sensor_msgs/Image image_raw
-string dataset
-float32 confidence
-float32 nms
----
-lasr_vision_msgs/Detection[] detected_objects
\ No newline at end of file
diff --git a/legacy/lasr_object_detection_yolo/train/.template/yolov4-tiny.cfg b/legacy/lasr_object_detection_yolo/train/.template/yolov4-tiny.cfg
deleted file mode 100644
index dc6f5bfb8..000000000
--- a/legacy/lasr_object_detection_yolo/train/.template/yolov4-tiny.cfg
+++ /dev/null
@@ -1,281 +0,0 @@
-[net]
-# Testing
-#batch=1
-#subdivisions=1
-# Training
-batch=64
-subdivisions=1
-width=416
-height=416
-channels=3
-momentum=0.9
-decay=0.0005
-angle=0
-saturation = 1.5
-exposure = 1.5
-hue=.1
-
-learning_rate=0.00261
-burn_in=1000
-max_batches = 500200
-policy=steps
-steps=400000,450000
-scales=.1,.1
-
-[convolutional]
-batch_normalize=1
-filters=32
-size=3
-stride=2
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=3
-stride=2
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers=-1
-groups=2
-group_id=1
-
-[convolutional]
-batch_normalize=1
-filters=32
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=32
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers = -1,-2
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers = -6,-1
-
-[maxpool]
-size=2
-stride=2
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers=-1
-groups=2
-group_id=1
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers = -1,-2
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers = -6,-1
-
-[maxpool]
-size=2
-stride=2
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers=-1
-groups=2
-group_id=1
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers = -1,-2
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers = -6,-1
-
-[maxpool]
-size=2
-stride=2
-
-[convolutional]
-batch_normalize=1
-filters=512
-size=3
-stride=1
-pad=1
-activation=leaky
-
-##################################
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=512
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-size=1
-stride=1
-pad=1
-filters=255
-activation=linear
-
-
-
-[yolo]
-mask = 3,4,5
-anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319
-classes=80
-num=6
-jitter=.3
-scale_x_y = 1.05
-cls_normalizer=1.0
-iou_normalizer=0.07
-iou_loss=ciou
-ignore_thresh = .7
-truth_thresh = 1
-random=0
-resize=1.5
-nms_kind=greedynms
-beta_nms=0.6
-
-[route]
-layers = -4
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[upsample]
-stride=2
-
-[route]
-layers = -1, 23
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-size=1
-stride=1
-pad=1
-filters=255
-activation=linear
-
-[yolo]
-mask = 1,2,3
-anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319
-classes=80
-num=6
-jitter=.3
-scale_x_y = 1.05
-cls_normalizer=1.0
-iou_normalizer=0.07
-iou_loss=ciou
-ignore_thresh = .7
-truth_thresh = 1
-random=0
-resize=1.5
-nms_kind=greedynms
-beta_nms=0.6
diff --git a/legacy/lasr_object_detection_yolo/train/.template/yolov4.cfg b/legacy/lasr_object_detection_yolo/train/.template/yolov4.cfg
deleted file mode 100644
index f43259bf7..000000000
--- a/legacy/lasr_object_detection_yolo/train/.template/yolov4.cfg
+++ /dev/null
@@ -1,1157 +0,0 @@
-[net]
-batch=64
-subdivisions=8
-# Training
-#width=512
-#height=512
-width=608
-height=608
-channels=3
-momentum=0.949
-decay=0.0005
-angle=0
-saturation = 1.5
-exposure = 1.5
-hue=.1
-
-learning_rate=0.0013
-burn_in=1000
-max_batches = 500500
-policy=steps
-steps=400000,450000
-scales=.1,.1
-
-#cutmix=1
-mosaic=0
-
-#:104x104 54:52x52 85:26x26 104:13x13 for 416
-
-[convolutional]
-batch_normalize=1
-filters=32
-size=3
-stride=1
-pad=1
-activation=mish
-
-# Downsample
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=3
-stride=2
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=1
-stride=1
-pad=1
-activation=mish
-
-[route]
-layers = -2
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=32
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=3
-stride=1
-pad=1
-activation=mish
-
-[shortcut]
-from=-3
-activation=linear
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=1
-stride=1
-pad=1
-activation=mish
-
-[route]
-layers = -1,-7
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=1
-stride=1
-pad=1
-activation=mish
-
-# Downsample
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=3
-stride=2
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=1
-stride=1
-pad=1
-activation=mish
-
-[route]
-layers = -2
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=3
-stride=1
-pad=1
-activation=mish
-
-[shortcut]
-from=-3
-activation=linear
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=3
-stride=1
-pad=1
-activation=mish
-
-[shortcut]
-from=-3
-activation=linear
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=1
-stride=1
-pad=1
-activation=mish
-
-[route]
-layers = -1,-10
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=1
-stride=1
-pad=1
-activation=mish
-
-# Downsample
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=3
-stride=2
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=1
-stride=1
-pad=1
-activation=mish
-
-[route]
-layers = -2
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=3
-stride=1
-pad=1
-activation=mish
-
-[shortcut]
-from=-3
-activation=linear
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=3
-stride=1
-pad=1
-activation=mish
-
-[shortcut]
-from=-3
-activation=linear
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=3
-stride=1
-pad=1
-activation=mish
-
-[shortcut]
-from=-3
-activation=linear
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=3
-stride=1
-pad=1
-activation=mish
-
-[shortcut]
-from=-3
-activation=linear
-
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=3
-stride=1
-pad=1
-activation=mish
-
-[shortcut]
-from=-3
-activation=linear
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=3
-stride=1
-pad=1
-activation=mish
-
-[shortcut]
-from=-3
-activation=linear
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=3
-stride=1
-pad=1
-activation=mish
-
-[shortcut]
-from=-3
-activation=linear
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=3
-stride=1
-pad=1
-activation=mish
-
-[shortcut]
-from=-3
-activation=linear
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=1
-stride=1
-pad=1
-activation=mish
-
-[route]
-layers = -1,-28
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=1
-stride=1
-pad=1
-activation=mish
-
-# Downsample
-
-[convolutional]
-batch_normalize=1
-filters=512
-size=3
-stride=2
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=1
-stride=1
-pad=1
-activation=mish
-
-[route]
-layers = -2
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=3
-stride=1
-pad=1
-activation=mish
-
-[shortcut]
-from=-3
-activation=linear
-
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=3
-stride=1
-pad=1
-activation=mish
-
-[shortcut]
-from=-3
-activation=linear
-
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=3
-stride=1
-pad=1
-activation=mish
-
-[shortcut]
-from=-3
-activation=linear
-
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=3
-stride=1
-pad=1
-activation=mish
-
-[shortcut]
-from=-3
-activation=linear
-
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=3
-stride=1
-pad=1
-activation=mish
-
-[shortcut]
-from=-3
-activation=linear
-
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=3
-stride=1
-pad=1
-activation=mish
-
-[shortcut]
-from=-3
-activation=linear
-
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=3
-stride=1
-pad=1
-activation=mish
-
-[shortcut]
-from=-3
-activation=linear
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=3
-stride=1
-pad=1
-activation=mish
-
-[shortcut]
-from=-3
-activation=linear
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=1
-stride=1
-pad=1
-activation=mish
-
-[route]
-layers = -1,-28
-
-[convolutional]
-batch_normalize=1
-filters=512
-size=1
-stride=1
-pad=1
-activation=mish
-
-# Downsample
-
-[convolutional]
-batch_normalize=1
-filters=1024
-size=3
-stride=2
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=512
-size=1
-stride=1
-pad=1
-activation=mish
-
-[route]
-layers = -2
-
-[convolutional]
-batch_normalize=1
-filters=512
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=512
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=512
-size=3
-stride=1
-pad=1
-activation=mish
-
-[shortcut]
-from=-3
-activation=linear
-
-[convolutional]
-batch_normalize=1
-filters=512
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=512
-size=3
-stride=1
-pad=1
-activation=mish
-
-[shortcut]
-from=-3
-activation=linear
-
-[convolutional]
-batch_normalize=1
-filters=512
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=512
-size=3
-stride=1
-pad=1
-activation=mish
-
-[shortcut]
-from=-3
-activation=linear
-
-[convolutional]
-batch_normalize=1
-filters=512
-size=1
-stride=1
-pad=1
-activation=mish
-
-[convolutional]
-batch_normalize=1
-filters=512
-size=3
-stride=1
-pad=1
-activation=mish
-
-[shortcut]
-from=-3
-activation=linear
-
-[convolutional]
-batch_normalize=1
-filters=512
-size=1
-stride=1
-pad=1
-activation=mish
-
-[route]
-layers = -1,-16
-
-[convolutional]
-batch_normalize=1
-filters=1024
-size=1
-stride=1
-pad=1
-activation=mish
-
-##########################
-
-[convolutional]
-batch_normalize=1
-filters=512
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-size=3
-stride=1
-pad=1
-filters=1024
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=512
-size=1
-stride=1
-pad=1
-activation=leaky
-
-### SPP ###
-[maxpool]
-stride=1
-size=5
-
-[route]
-layers=-2
-
-[maxpool]
-stride=1
-size=9
-
-[route]
-layers=-4
-
-[maxpool]
-stride=1
-size=13
-
-[route]
-layers=-1,-3,-5,-6
-### End SPP ###
-
-[convolutional]
-batch_normalize=1
-filters=512
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-size=3
-stride=1
-pad=1
-filters=1024
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=512
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[upsample]
-stride=2
-
-[route]
-layers = 85
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers = -1, -3
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-size=3
-stride=1
-pad=1
-filters=512
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-size=3
-stride=1
-pad=1
-filters=512
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[upsample]
-stride=2
-
-[route]
-layers = 54
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers = -1, -3
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-size=3
-stride=1
-pad=1
-filters=256
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-size=3
-stride=1
-pad=1
-filters=256
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=1
-stride=1
-pad=1
-activation=leaky
-
-##########################
-
-[convolutional]
-batch_normalize=1
-size=3
-stride=1
-pad=1
-filters=256
-activation=leaky
-
-[convolutional]
-size=1
-stride=1
-pad=1
-filters=255
-activation=linear
-
-
-[yolo]
-mask = 0,1,2
-anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
-classes=80
-num=9
-jitter=.3
-ignore_thresh = .7
-truth_thresh = 1
-scale_x_y = 1.2
-iou_thresh=0.213
-cls_normalizer=1.0
-iou_normalizer=0.07
-iou_loss=ciou
-nms_kind=greedynms
-beta_nms=0.6
-max_delta=5
-
-
-[route]
-layers = -4
-
-[convolutional]
-batch_normalize=1
-size=3
-stride=2
-pad=1
-filters=256
-activation=leaky
-
-[route]
-layers = -1, -16
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-size=3
-stride=1
-pad=1
-filters=512
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-size=3
-stride=1
-pad=1
-filters=512
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-size=3
-stride=1
-pad=1
-filters=512
-activation=leaky
-
-[convolutional]
-size=1
-stride=1
-pad=1
-filters=255
-activation=linear
-
-
-[yolo]
-mask = 3,4,5
-anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
-classes=80
-num=9
-jitter=.3
-ignore_thresh = .7
-truth_thresh = 1
-scale_x_y = 1.1
-iou_thresh=0.213
-cls_normalizer=1.0
-iou_normalizer=0.07
-iou_loss=ciou
-nms_kind=greedynms
-beta_nms=0.6
-max_delta=5
-
-
-[route]
-layers = -4
-
-[convolutional]
-batch_normalize=1
-size=3
-stride=2
-pad=1
-filters=512
-activation=leaky
-
-[route]
-layers = -1, -37
-
-[convolutional]
-batch_normalize=1
-filters=512
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-size=3
-stride=1
-pad=1
-filters=1024
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=512
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-size=3
-stride=1
-pad=1
-filters=1024
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=512
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-size=3
-stride=1
-pad=1
-filters=1024
-activation=leaky
-
-[convolutional]
-size=1
-stride=1
-pad=1
-filters=255
-activation=linear
-
-
-[yolo]
-mask = 6,7,8
-anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
-classes=80
-num=9
-jitter=.3
-ignore_thresh = .7
-truth_thresh = 1
-random=1
-scale_x_y = 1.05
-iou_thresh=0.213
-cls_normalizer=1.0
-iou_normalizer=0.07
-iou_loss=ciou
-nms_kind=greedynms
-beta_nms=0.6
-max_delta=5
\ No newline at end of file
diff --git a/legacy/lasr_object_detection_yolo/train/Dockerfile b/legacy/lasr_object_detection_yolo/train/Dockerfile
deleted file mode 100644
index a53eb4553..000000000
--- a/legacy/lasr_object_detection_yolo/train/Dockerfile
+++ /dev/null
@@ -1,14 +0,0 @@
-FROM python:3 as builder
-
-COPY . .
-ENV PATH $PATH:/scripts
-
-ENV USE_GPU 0
-ENV USE_OPENCV 0
-ENV USE_OPENMP 0
-ENV USE_CUDNN 0
-
-RUN mkdir datasets config weights
-RUN build_darknet.sh
-
-ENTRYPOINT ["train.sh"]
\ No newline at end of file
diff --git a/legacy/lasr_object_detection_yolo/train/README.md b/legacy/lasr_object_detection_yolo/train/README.md
deleted file mode 100644
index bcfca6a79..000000000
--- a/legacy/lasr_object_detection_yolo/train/README.md
+++ /dev/null
@@ -1,25 +0,0 @@
-## Train YOLOv4
-
-For generating darknet config files and training YOLOv4.
-
-# Instructions
-
-`docker build -t darknet .`
-
-Mount the your data to /dataset/data:
-
-`docker run -v data:/datasets/data darknet`
-
-To run with GPU / CUDNN / OPENCV / OPENMP:
-
-`docker run -e USE_GPU=1 -v data:/datasets/data darknet`
-
-# Details
-
-**build_darknet.sh** builds darknet from source, modify this to change compilation options.
-
-**train.sh** for training YOLOv4. Usage is ./train.sh DATASET, where DATASET is a directory name in **datasets** (you must create this root directory).
-
-If you encounter issues with darknet, you may want to alter the **batch_size** and **subdivisions**.
-
-Run outside of ROS environment.
\ No newline at end of file
diff --git a/legacy/lasr_object_detection_yolo/train/scripts/build_darknet.sh b/legacy/lasr_object_detection_yolo/train/scripts/build_darknet.sh
deleted file mode 100755
index 96694a266..000000000
--- a/legacy/lasr_object_detection_yolo/train/scripts/build_darknet.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/bash
-git clone https://github.com/AlexeyAB/darknet
-make -C darknet GPU=$USE_GPU OPENCV=$USE_OPENCV CUDNN=$USE_CUDNN OPENMP=$USE_OPENMP
-wget -P pretrained_weights https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v3_optimal/yolov4.conv.137
\ No newline at end of file
diff --git a/legacy/lasr_object_detection_yolo/train/scripts/generate_config_files.py b/legacy/lasr_object_detection_yolo/train/scripts/generate_config_files.py
deleted file mode 100644
index 39304b059..000000000
--- a/legacy/lasr_object_detection_yolo/train/scripts/generate_config_files.py
+++ /dev/null
@@ -1,123 +0,0 @@
-import os
-import sys
-import random
-
-# validate input
-if len(sys.argv) < 2 or len(sys.argv) > 3:
- print('usage: python makeTestFile.py' + \
- ' [full path to lasr_darknet_config for use on different system]')
- sys.exit()
-
-# get dataset name
-dataset = sys.argv[1]
-
-# lasr_darknet_config path
-
-lasr_path = os.path.dirname(os.path.realpath(__file__)) if not os.path.exists("/.dockerenv") else ""
-
-# folder paths
-cfg_path = lasr_path + '/config/' + dataset
-img_path = lasr_path + '/datasets/' + dataset
-bkp_path = lasr_path + '/weights/' + dataset
-
-# file paths
-data_path = cfg_path + '/' + dataset + '.data'
-train_path = cfg_path + '/train_' + dataset + '.txt'
-valid_path = cfg_path + '/valid_' + dataset + '.txt'
-names_path = img_path + '/classes.txt'
-yv4_path = cfg_path + '/yolov4.cfg'
-yv4tiny_path = cfg_path + '/yolov4-tiny.cfg'
-yv4tiny_tplt_path = lasr_path + '/.template/yolov4-tiny.cfg'
-yv4_tplt_path = lasr_path + '/.template/yolov4.cfg'
-
-# validate paths
-# write something here!!!!
-
-# create dirs
-if not os.path.exists(cfg_path):
- os.makedirs(cfg_path)
-if not os.path.exists(bkp_path):
- os.makedirs(bkp_path)
-
-# get number of classes
-classes = 0
-with open(names_path) as f:
- for line in f:
- classes += 1
-
-# SET GEN PATHS
-if len(sys.argv) == 3:
- gen_path = sys.argv[2]
-else:
- gen_path = lasr_path
-
-# set gen paths
-cfg_gen_path = gen_path + '/config/' + dataset
-img_gen_path = gen_path + '/datasets/' + dataset
-bkp_gen_path = gen_path + '/weights/' + dataset
-train_gen_path = cfg_gen_path + '/train_' + dataset + '.txt'
-valid_gen_path = cfg_gen_path + '/valid_' + dataset + '.txt'
-names_gen_path = img_gen_path + '/classes.txt'
-
-# --- CREATE TRAIN AND VALID FILES --- #
-train_file = open(train_path, 'w')
-valid_file = open(valid_path, 'w')
-
-# find jpgs in "datasets" folder and add all paths to train and valid lists
-for root, dirs, files in os.walk(img_path):
- for filename in files:
- if filename.endswith('.jpg'):
- text_out = img_gen_path + '/' + filename + '\n'
- # one in ten files is a validation image
- if (random.randint(-1, 10)):
- train_file.write(text_out)
- else:
- valid_file.write(text_out)
-
-train_file.close()
-valid_file.close()
-
-
-
-# --- CREATE DATA FILE --- #
-data_file = open(data_path, 'w')
-data_file.write('classes = ' + str(classes) + '\n')
-data_file.write('train = ' + train_gen_path + '\n')
-data_file.write('valid = ' + valid_gen_path + '\n')
-data_file.write('names = ' + names_gen_path + '\n')
-data_file.write('backup = ' + bkp_gen_path + '\n')
-
-
-
-# --- CREATE YOLOV4 TINY AND YOLOV4 CFG FILES --- #
-cfg_file = [open(yv4tiny_path, 'w'), open(yv4_path, 'w')]
-templates = [yv4tiny_tplt_path, yv4_tplt_path]
-
-max_batches = classes * 2000
-filters = (classes + 5) * 3
-
-for i in range(2):
- filters_lines = []
- filters_last = 0
-
- # first pass - find filters lines to be replaced
- with open(templates[i]) as f:
- for lineno, line in enumerate(f):
- if line.startswith('filters'):
- filters_last = lineno
- elif line.startswith('[yolo]'):
- filters_lines.append(filters_last)
-
- # second pass - copy lines to new cfg with replacement
- with open(templates[i]) as f:
- for lineno, line in enumerate(f):
- if lineno in filters_lines:
- cfg_file[i].write('filters=' + str(filters) + '\n')
- elif line.startswith('classes'):
- cfg_file[i].write('classes=' + str(classes) + '\n')
- elif line.startswith('max_batches'):
- cfg_file[i].write('max_batches=' + str(max_batches) + '\n')
- elif line.startswith('steps='):
- cfg_file[i].write('steps=' + str((max_batches / 10) * 8) + ',' + str((max_batches / 10) * 9) + '\n')
- else:
- cfg_file[i].write(line)
diff --git a/legacy/lasr_object_detection_yolo/train/scripts/train.sh b/legacy/lasr_object_detection_yolo/train/scripts/train.sh
deleted file mode 100755
index 4d1d0e62a..000000000
--- a/legacy/lasr_object_detection_yolo/train/scripts/train.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/bash
-DATASET=$1
-python scripts/generate_config_files.py $DATASET
-./darknet/darknet detector train config/$DATASET/$DATASET.data config/$DATASET/yolov4.cfg pretrained_weights/yolov4.conv.137
\ No newline at end of file