diff --git a/.gitmodules b/.gitmodules index 2f845f4b6..e69de29bb 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +0,0 @@ -[submodule "common/third_party/aruco_ros"] - path = common/third_party/aruco_ros - url = git@github.com:pal-robotics/aruco_ros.git diff --git a/legacy/lasr_web_server/CMakeLists.txt b/common/helpers/cv2_pcl/CMakeLists.txt similarity index 96% rename from legacy/lasr_web_server/CMakeLists.txt rename to common/helpers/cv2_pcl/CMakeLists.txt index 85fdea9c8..83e2d9e52 100644 --- a/legacy/lasr_web_server/CMakeLists.txt +++ b/common/helpers/cv2_pcl/CMakeLists.txt @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 3.0.2) -project(lasr_web_server) +project(cv2_pcl) ## Compile as C++11, supported in ROS Kinetic and newer # add_compile_options(-std=c++11) @@ -100,7 +100,7 @@ catkin_python_setup() ## DEPENDS: system dependencies of this project that dependent projects also need catkin_package( # INCLUDE_DIRS include -# LIBRARIES lasr_web_server +# LIBRARIES cv2_pcl # CATKIN_DEPENDS other_catkin_pkg # DEPENDS system_lib ) @@ -118,7 +118,7 @@ include_directories( ## Declare a C++ library # add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/lasr_web_server.cpp +# src/${PROJECT_NAME}/cv2_pcl.cpp # ) ## Add cmake target dependencies of the library @@ -129,7 +129,7 @@ include_directories( ## Declare a C++ executable ## With catkin_make all packages are built within a single CMake context ## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/lasr_web_server_node.cpp) +# add_executable(${PROJECT_NAME}_node src/cv2_pcl_node.cpp) ## Rename C++ executable without prefix ## The above recommended prefix causes long target names, the following renames the @@ -193,7 +193,7 @@ include_directories( ############# ## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_lasr_web_server.cpp) +# catkin_add_gtest(${PROJECT_NAME}-test test/test_cv2_pcl.cpp) # if(TARGET ${PROJECT_NAME}-test) # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) # endif() diff --git a/legacy/lasr_dialogflow/package.xml b/common/helpers/cv2_pcl/package.xml similarity index 89% rename from legacy/lasr_dialogflow/package.xml rename to common/helpers/cv2_pcl/package.xml index 05993da96..d8335720b 100644 --- a/legacy/lasr_dialogflow/package.xml +++ b/common/helpers/cv2_pcl/package.xml @@ -1,25 +1,25 @@ - lasr_dialogflow + cv2_pcl 0.0.0 - The lasr_dialogflow package + The cv2_pcl package - jared - + Jared Swift +a - TODO + MIT - + @@ -49,6 +49,7 @@ catkin + sensor_msgs diff --git a/legacy/people_detection/train_dataset_model/setup.py b/common/helpers/cv2_pcl/setup.py similarity index 56% rename from legacy/people_detection/train_dataset_model/setup.py rename to common/helpers/cv2_pcl/setup.py index f91f24c03..76e948739 100644 --- a/legacy/people_detection/train_dataset_model/setup.py +++ b/common/helpers/cv2_pcl/setup.py @@ -3,9 +3,6 @@ from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup -setup_args = generate_distutils_setup( - packages=['train_dataset_model'], - package_dir={'': 'src'} -) +setup_args = generate_distutils_setup(packages=["cv2_pcl"], package_dir={"": "src"}) setup(**setup_args) diff --git a/legacy/common_math/src/common_math/math_.py b/common/helpers/cv2_pcl/src/cv2_pcl/__init__.py old mode 100755 new mode 100644 similarity index 50% rename from legacy/common_math/src/common_math/math_.py rename to common/helpers/cv2_pcl/src/cv2_pcl/__init__.py index cff4af892..a0537579e --- a/legacy/common_math/src/common_math/math_.py +++ b/common/helpers/cv2_pcl/src/cv2_pcl/__init__.py @@ -1,34 +1,19 @@ -#!/usr/bin/python3 -import numpy as np -import rospy import numpy as np +from sensor_msgs.msg import PointCloud2 import ros_numpy as rnp -# import cv2 -# from geometry_msgs.msg import PointStamped, Point -# from std_msgs.msg import Header -# from robocup_receptionist.srv import TfTransform, TfTransformRequest -import math import cv2 +Mat = np.ndarray -def pcl_msg_to_cv2(pcl_msg): +def pcl_to_cv2(pcl: PointCloud2) -> Mat: """ - Constructs a cv2 image from a PointCloud2 message. - - Parameters - ---------- - pcl_msg : sensor_msgs/PointCloud2 - Input pointcloud (organised) - - Returns - ------- - np.array : cv2 image + Convert a given PointCloud2 message to a cv2 image """ # Extract rgb image from pointcloud - frame = np.fromstring(pcl_msg.data, dtype=np.uint8) - frame = frame.reshape(pcl_msg.height, pcl_msg.width, 32) + frame = np.fromstring(pcl.data, dtype=np.uint8) + frame = frame.reshape(pcl.height, pcl.width, 32) frame = frame[:, :, 16:19] # Ensure array is contiguous @@ -37,12 +22,16 @@ def pcl_msg_to_cv2(pcl_msg): return frame -def seg_to_centroid(pcl_msg, xyseg): +def seg_to_centroid(pcl: PointCloud2, xyseg: np.ndarray) -> np.ndarray: + """ + Computes the centroid of a given segment in a pointcloud + """ + # Convert xyseg to contours contours = xyseg.reshape(-1, 2) # cv2 image - cv_im = pcl_msg_to_cv2(pcl_msg) + cv_im = pcl_to_cv2(pcl) # Compute mask from contours mask = np.zeros(shape=cv_im.shape[:2]) cv2.fillPoly(mask, pts=[contours], color=(255, 255, 255)) @@ -54,20 +43,24 @@ def seg_to_centroid(pcl_msg, xyseg): return np.full(3, np.nan) # Unpack pointcloud into xyz array - pcl_xyz = rnp.point_cloud2.pointcloud2_to_xyz_array(pcl_msg, remove_nans=False) + pcl_xyz = rnp.point_cloud2.pointcloud2_to_xyz_array(pcl, remove_nans=False) # Extract points of interest xyz_points = [pcl_xyz[x][y] for x, y in indices] - return np.nanmean(xyz_points, axis=0) + return np.mean(xyz_points, axis=0) + +def bb_to_centroid(pcl: PointCloud2, x: int, y: int, w: int, h: int) -> np.ndarray: + """ + Computes the centroid of a given bounding box in a pointcloud + """ -def bb_to_centroid(pcl_msg, x, y, w, h): # Convert xywh to bounding box coordinates. x1, y1, x2, y2 = x, y, x + w, y + h # cv2 image - frame = pcl_msg_to_cv2(pcl_msg) + frame = pcl_to_cv2(pcl) # Compute mask from bounding box mask = np.zeros(shape=frame.shape[:2]) mask[y1:y2, x1:x2] = 255 # bounding box dimensions @@ -78,27 +71,10 @@ def bb_to_centroid(pcl_msg, x, y, w, h): return np.full(3, np.nan) # Unpack pointcloud into xyz array - pcl_xyz = rnp.point_cloud2.pointcloud2_to_xyz_array(pcl_msg, remove_nans=False) + pcl_xyz = rnp.point_cloud2.pointcloud2_to_xyz_array(pcl, remove_nans=False) # Extract points of interest xyz_points = [pcl_xyz[x][y] for x, y in indices] # Get mean (centroid) point. return np.nanmean(xyz_points, axis=0) - - -def euclidian(a, b): - ''' - Get the Euclidean distance between two positions - ''' - return math.sqrt( - math.pow(a.x - b.x, 2) + - math.pow(a.y - b.y, 2) + - math.pow(a.z - b.z, 2) - ) -def euclidian_distance(p1, p2): - x1, y1 = p1 - x2, y2 = p2 - a = np.array((x1, y1)) - b = np.array((x2, y2)) - return np.linalg.norm(a - b) diff --git a/legacy/markers/CMakeLists.txt b/common/helpers/markers/CMakeLists.txt similarity index 97% rename from legacy/markers/CMakeLists.txt rename to common/helpers/markers/CMakeLists.txt index eb31859f0..d0c2c2d41 100644 --- a/legacy/markers/CMakeLists.txt +++ b/common/helpers/markers/CMakeLists.txt @@ -8,11 +8,9 @@ project(markers) ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) ## is used, also find other catkin packages find_package(catkin REQUIRED COMPONENTS - geometry_msgs - message_generation - roscpp rospy - std_msgs + visualization_msgs + geometry_msgs ) ## System dependencies are found with CMake's conventions @@ -22,7 +20,7 @@ find_package(catkin REQUIRED COMPONENTS ## Uncomment this if the package has a setup.py. This macro ensures ## modules and global scripts declared therein get installed ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html - catkin_python_setup() +# catkin_python_setup() ################################################ ## Declare ROS messages, services and actions ## @@ -72,7 +70,7 @@ find_package(catkin REQUIRED COMPONENTS ## Generate added messages and services with any dependencies listed here # generate_messages( # DEPENDENCIES -# geometry_msgs# std_msgs +# visualization_msgs # ) ################################################ @@ -107,7 +105,7 @@ find_package(catkin REQUIRED COMPONENTS catkin_package( # INCLUDE_DIRS include # LIBRARIES markers -# CATKIN_DEPENDS geometry_msgs message_generation roscpp rospy std_msgs +# CATKIN_DEPENDS rospy visualization_msgs # DEPENDS system_lib ) diff --git a/legacy/markers/package.xml b/common/helpers/markers/package.xml similarity index 87% rename from legacy/markers/package.xml rename to common/helpers/markers/package.xml index 2fcc15ea2..dc51b8f73 100644 --- a/legacy/markers/package.xml +++ b/common/helpers/markers/package.xml @@ -7,13 +7,13 @@ - nicole + Jared Swift - TODO + MIT @@ -49,19 +49,15 @@ catkin - geometry_msgs - message_generation - roscpp rospy - std_msgs - geometry_msgs - roscpp + visualization_msgs + geometry_msgs rospy - std_msgs - geometry_msgs - roscpp + visualization_msgs + geometry_msgs rospy - std_msgs + visualization_msgs + geometry_msgs diff --git a/common/helpers/tf_module/setup.py b/common/helpers/markers/setup.py similarity index 52% rename from common/helpers/tf_module/setup.py rename to common/helpers/markers/setup.py index dcfac4bf8..69e7398b0 100644 --- a/common/helpers/tf_module/setup.py +++ b/common/helpers/markers/setup.py @@ -3,10 +3,6 @@ from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup -# fetch values from package.xml -setup_args = generate_distutils_setup( - packages=['tf_module'], - package_dir={'': 'src'} -) +setup_args = generate_distutils_setup(packages=["markers"], package_dir={"": "src"}) setup(**setup_args) diff --git a/common/helpers/markers/src/markers/__init__.py b/common/helpers/markers/src/markers/__init__.py new file mode 100644 index 000000000..996b83689 --- /dev/null +++ b/common/helpers/markers/src/markers/__init__.py @@ -0,0 +1,50 @@ +import rospy + +from visualization_msgs.msg import Marker +from geometry_msgs.msg import PointStamped + +from collections import defaultdict + +publisher_counts = defaultdict(int) + + +def create_marker( + point_stamped: PointStamped, + idx: int, + r: float = 0.0, + g: float = 1.0, + b: float = 0.0, +): + marker_msg = Marker() + marker_msg.header.frame_id = point_stamped.header.frame_id + marker_msg.header.stamp = point_stamped.header.stamp + marker_msg.id = idx + marker_msg.type = Marker.SPHERE + marker_msg.action = Marker.ADD + marker_msg.pose.position = point_stamped.point + marker_msg.pose.orientation.w = 1.0 + marker_msg.scale.x = 0.1 + marker_msg.scale.y = 0.1 + marker_msg.scale.z = 0.1 + marker_msg.color.a = 1.0 + marker_msg.color.r = r + marker_msg.color.g = g + marker_msg.color.b = b + return marker_msg + + +def create_and_publish_marker( + publisher: rospy.Publisher, + point_stamped: PointStamped, + idx: int | None = None, + r: float = 0.0, + g: float = 1.0, + b: float = 0.0, +): + if idx is None: + global publisher_counts + idx = publisher_counts[publisher] + publisher_counts[publisher] += 1 + + marker_msg = create_marker(point_stamped, idx, r, g, b) + publisher.publish(marker_msg) diff --git a/common/helpers/tf_module/CMakeLists.txt b/common/helpers/tf_module/CMakeLists.txt deleted file mode 100644 index 4867b5a6f..000000000 --- a/common/helpers/tf_module/CMakeLists.txt +++ /dev/null @@ -1,212 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(tf_module) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - geometry_msgs - message_generation - roscpp - rospy - sensor_msgs - std_msgs -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder - add_service_files( - FILES - TfTransform.srv - BaseTransform.srv - LatestTransform.srv - ApplyTransform.srv - ) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here - generate_messages( - DEPENDENCIES - geometry_msgs sensor_msgs std_msgs - ) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES tf_module -# CATKIN_DEPENDS geometry_msgs message_generation roscpp rospy senson_msgs std_msgs -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/tf_module.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/tf_module_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -#catkin_install_python(PROGRAMS - # src/tf_module/tf_transforms.py - #DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -#) - - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_tf_module.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/common/helpers/tf_module/README.md b/common/helpers/tf_module/README.md deleted file mode 100644 index 0ee548b5a..000000000 --- a/common/helpers/tf_module/README.md +++ /dev/null @@ -1,123 +0,0 @@ -# tf_module - -The tf_module package - -This package is maintained by: -- [nicole](mailto:nicole@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- geometry_msgs (build) -- message_generation (build) -- roscpp (build) -- rospy (build) -- sensor_msgs (build) -- std_msgs (build) -- geometry_msgs (exec) -- roscpp (exec) -- rospy (exec) -- sensor_msgs (exec) -- std_msgs (exec) -- catkin (buildtool) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -#### `tf_transforms` - -No description provided. - - -### Messages - -This package has no messages. - -### Services - -#### `ApplyTransform` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| points | geometry_msgs/Point[] | point we want to transform | -| transform | geometry_msgs/TransformStamped | the transform we want to use | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| new_points | geometry_msgs/Point[] | the transformed point | - -#### `TfTransform` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| pose_array | geometry_msgs/PoseArray | | -| pointcloud | sensor_msgs/PointCloud2 | | -| point | geometry_msgs/PointStamped | | -| target_frame | std_msgs/String | | -| source_frame | std_msgs/String | | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| target_pose_array | geometry_msgs/PoseArray | | -| target_pointcloud | sensor_msgs/PointCloud2 | | -| target_point | geometry_msgs/PointStamped | | -| translation_and_rotation | geometry_msgs/Pose | | - -#### `LatestTransform` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| from_frame | string | source frame | -| target_frame | string | target frame | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| transform | geometry_msgs/TransformStamped | transform | - -#### `BaseTransform` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| points | geometry_msgs/Point[] | point we want to transform | -| frame | std_msgs/String | the frame of the point | -| target_frame | std_msgs/String | the frame we want to transform the point to | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| new_points | geometry_msgs/PointStamped[] | the transformed point | - - -### Actions - -This package has no actions. diff --git a/common/helpers/tf_module/launch/tf_transforms.launch b/common/helpers/tf_module/launch/tf_transforms.launch deleted file mode 100644 index bc8f94bc0..000000000 --- a/common/helpers/tf_module/launch/tf_transforms.launch +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/common/helpers/tf_module/package.xml b/common/helpers/tf_module/package.xml deleted file mode 100644 index ef34656ee..000000000 --- a/common/helpers/tf_module/package.xml +++ /dev/null @@ -1,76 +0,0 @@ - - - tf_module - 0.0.0 - The tf_module package - - - - - nicole - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - geometry_msgs - message_generation - roscpp - rospy - sensor_msgs - std_msgs - geometry_msgs - roscpp - rospy - sensor_msgs - std_msgs - geometry_msgs - roscpp - rospy - sensor_msgs - std_msgs - catkin - - - - - - - - diff --git a/common/helpers/tf_module/src/tf_module/__init__.py b/common/helpers/tf_module/src/tf_module/__init__.py deleted file mode 100644 index 644396d55..000000000 --- a/common/helpers/tf_module/src/tf_module/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# from .tf_transforms_base import * \ No newline at end of file diff --git a/common/helpers/tf_module/src/tf_module/tf_transforms_base.py b/common/helpers/tf_module/src/tf_module/tf_transforms_base.py deleted file mode 100755 index 5176ef063..000000000 --- a/common/helpers/tf_module/src/tf_module/tf_transforms_base.py +++ /dev/null @@ -1,221 +0,0 @@ -#!/usr/bin/env python -import rospy -import tf2_ros -import tf2_geometry_msgs -import tf2_sensor_msgs - -from geometry_msgs.msg import PoseStamped, PoseArray, Pose, TransformStamped -from tf_module.srv import TfTransform, TfTransformResponse, LatestTransform, LatestTransformResponse, ApplyTransform, \ - ApplyTransformResponse, BaseTransform, BaseTransformResponse - -from geometry_msgs.msg import PoseStamped, Vector3Stamped, PointStamped, WrenchStamped -import PyKDL -import rospy -import tf2_ros -import copy - - -def tf_transform(msg): - print(msg) - tf_response = TfTransformResponse() - if msg.pose_array.header.frame_id != '': - transformation = get_transform(source_frame=msg.pose_array.header.frame_id, target_frame=msg.target_frame.data) - if transformation: - pose_array = PoseArray() - pose_array.header.frame_id = msg.target_frame.data - pose_array.header.stamp = rospy.get_rostime() - - for pose in msg.pose_array.poses: - new_pose = tf2_geometry_msgs.do_transform_pose(PoseStamped(pose=pose), transformation).pose - pose_array.poses.append(new_pose) - - tf_response.target_pose_array = pose_array - else: - print('Error: No transformation') - if msg.pointcloud.header.frame_id != '': - transformation = get_transform(source_frame=msg.pointcloud.header.frame_id, target_frame=msg.target_frame.data, stamp=rospy.Time(0)) - if transformation: - new_pointcloud = tf2_sensor_msgs.do_transform_cloud(msg.pointcloud, transformation) - tf_response.target_pointcloud = new_pointcloud - else: - print('Error: No trasnformation') - if msg.point.header.frame_id != '': - transformation = get_transform(source_frame=msg.point.header.frame_id, target_frame=msg.target_frame.data, stamp=rospy.Time(0)) - if transformation: - try: - new_point = do_transform_point(msg.point, transformation) - tf_response.target_point = new_point - except Exception as e: - print(e) - print("---") - else: - print('Error: No trasnformation') - - p = None - if msg.source_frame.data != '': - transformation = get_transform(source_frame=msg.source_frame.data, target_frame=msg.target_frame.data, stamp=rospy.Time(0)) - print(transformation, ' theirs') - # return [t.x, t.y, t.z], [r.x, r.y, r.z, r.w] - lt_trans, lt_rot = lookupTransform(target_frame=msg.target_frame.data, source_frame=msg.source_frame.data, - time=rospy.Time(0)) - print(lt_trans, lt_rot, ' mine ') - p = Pose() - # return [t.x, t.y, t.z], [r.x, r.y, r.z, r.w] - p.position.x = lt_trans[0] - p.position.y = lt_trans[1] - p.position.z = lt_trans[2] - p.orientation.x = lt_rot[0] - p.orientation.y = lt_rot[1] - p.orientation.z = lt_rot[2] - p.orientation.w = lt_rot[3] - tf_response.translation_and_rotation = p - - return tf_response - - -def strip_leading_slash(s): - return s[1:] if s.startswith("/") else s - - -def lookupTransform(target_frame=None, source_frame=None, time=None): - assert (source_frame and target_frame) - try: - msg = tfBuffer.lookup_transform(strip_leading_slash(target_frame), strip_leading_slash(source_frame), time) - t = msg.transform.translation - r = msg.transform.rotation - return [t.x, t.y, t.z], [r.x, r.y, r.z, r.w] - except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as e: - print(e) - return None - - -# KDL stuff -def do_transform_point(point, transform): - p = transform_to_kdl(transform) * PyKDL.Vector(point.point.x, point.point.y, point.point.z) - res = PointStamped() - res.point.x = p[0] - res.point.y = p[1] - res.point.z = p[2] - res.header = transform.header - return res - - -tf2_ros.TransformRegistration().add(PointStamped, do_transform_point) - - -def transform_to_kdl(t): - return PyKDL.Frame(PyKDL.Rotation.Quaternion(t.transform.rotation.x, t.transform.rotation.y, - t.transform.rotation.z, t.transform.rotation.w), - PyKDL.Vector(t.transform.translation.x, - t.transform.translation.y, - t.transform.translation.z)) - - -# KDL stuff end - -# base functions for transformations -def transform_point_from_given(point, from_frame="base_laser_link", to_frame="map"): - try: - tr = get_transform(from_frame, to_frame, rospy.Time(0)) - new_point = apply_transform(point, tr, is_input_ps=False) - except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as e: - rospy.logwarn("Failed to transform point.") - rospy.logwarn(e) - new_point = PointStamped() - - return new_point - - -def apply_transform(ps_, transform, is_input_ps=True, target="xtion_rgb_optical_frame"): - """ - Apply transform of PointStamped or Point, with a given transform - """ - # assert (ps_ and transform) - if is_input_ps: - tr_point = do_transform_point(ps_, transform) - else: - ps = PointStamped() - ps.point = ps_ - try: - tr_point = do_transform_point(ps, transform) - except Exception as e: - rospy.logwarn("Failed to transform point.") - rospy.logwarn(e) - print(transform) - return PointStamped() - - return tr_point - - -def get_transform(source_frame, target_frame, stamp): - """ - Converts to target frame - Returns the pose in the target frame - """ - # assert (source_frame and target_frame) - try: - transformation = tfBuffer.lookup_transform(target_frame, source_frame, stamp, rospy.Duration(0.1)) - except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as e: - print(e) - transformation = TransformStamped() - - return transformation - -# base functions for transformations - -# callbacks -def transform_point_cb(msg): - """ - Simply transform a point given the point and the frame - """ - points = msg.points - from_frame = msg.frame.data - to_frame = msg.target_frame.data - - new_points = [] - for p in points: - new_point = transform_point_from_given(p, from_frame, to_frame) - new_points.append(new_point) - return BaseTransformResponse(new_points) - - -def apply_transform_cb(msg): - """ - The callback to apply transform - """ - rospy.logwarn('Applying transform') - new_p = [] - for p in msg.points: - p_tf = apply_transform(p, msg.transform, is_input_ps=False) - new_p.append(p_tf.point) - return ApplyTransformResponse(new_p) - - -def get_latest_transform_cb(msg): - """ - The callback to get the latest transform - """ - to_frame = msg.target_frame - from_frame = msg.from_frame - t = TransformStamped() - try: - t = get_transform(source_frame=from_frame, target_frame=to_frame, stamp=rospy.Time(0)) - except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as e: - print(e) - - return LatestTransformResponse(t) - -# callbacks end - -if __name__ == '__main__': - rospy.init_node('tf_transform_node') - s = rospy.Service('tf_transform', TfTransform, tf_transform) - s2 = rospy.Service('latest_transform', LatestTransform, get_latest_transform_cb) - s3 = rospy.Service('apply_transform', ApplyTransform, apply_transform_cb) - s4 = rospy.Service('base_transform', BaseTransform, transform_point_cb) - - tfBuffer = tf2_ros.Buffer() - listener = tf2_ros.TransformListener(tfBuffer) - - rospy.loginfo('Ready to transform!') - rospy.spin() diff --git a/common/helpers/tf_module/src/tf_module/transformations.py b/common/helpers/tf_module/src/tf_module/transformations.py deleted file mode 100755 index 8872cbe84..000000000 --- a/common/helpers/tf_module/src/tf_module/transformations.py +++ /dev/null @@ -1,1705 +0,0 @@ -# -*- coding: utf-8 -*- -# transformations.py - -# Copyright (c) 2006, Christoph Gohlke -# Copyright (c) 2006-2009, The Regents of the University of California -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of the copyright holders nor the names of any -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -"""Homogeneous Transformation Matrices and Quaternions. - -A library for calculating 4x4 matrices for translating, rotating, reflecting, -scaling, shearing, projecting, orthogonalizing, and superimposing arrays of -3D homogeneous coordinates as well as for converting between rotation matrices, -Euler angles, and quaternions. Also includes an Arcball control object and -functions to decompose transformation matrices. - -:Authors: - `Christoph Gohlke `__, - Laboratory for Fluorescence Dynamics, University of California, Irvine - -:Version: 20090418 - -Requirements ------------- - -* `Python 2.6 `__ -* `Numpy 1.3 `__ -* `transformations.c 20090418 `__ - (optional implementation of some functions in C) - -Notes ------ - -Matrices (M) can be inverted using numpy.linalg.inv(M), concatenated using -numpy.dot(M0, M1), or used to transform homogeneous coordinates (v) using -numpy.dot(M, v) for shape (4, \*) "point of arrays", respectively -numpy.dot(v, M.T) for shape (\*, 4) "array of points". - -Calculations are carried out with numpy.float64 precision. - -This Python implementation is not optimized for speed. - -Vector, point, quaternion, and matrix function arguments are expected to be -"array like", i.e. tuple, list, or numpy arrays. - -Return types are numpy arrays unless specified otherwise. - -Angles are in radians unless specified otherwise. - -Quaternions ix+jy+kz+w are represented as [x, y, z, w]. - -Use the transpose of transformation matrices for OpenGL glMultMatrixd(). - -A triple of Euler angles can be applied/interpreted in 24 ways, which can -be specified using a 4 character string or encoded 4-tuple: - - *Axes 4-string*: e.g. 'sxyz' or 'ryxy' - - - first character : rotations are applied to 's'tatic or 'r'otating frame - - remaining characters : successive rotation axis 'x', 'y', or 'z' - - *Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1) - - - inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix. - - parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed - by 'z', or 'z' is followed by 'x'. Otherwise odd (1). - - repetition : first and last axis are same (1) or different (0). - - frame : rotations are applied to static (0) or rotating (1) frame. - -References ----------- - -(1) Matrices and transformations. Ronald Goldman. - In "Graphics Gems I", pp 472-475. Morgan Kaufmann, 1990. -(2) More matrices and transformations: shear and pseudo-perspective. - Ronald Goldman. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991. -(3) Decomposing a matrix into simple transformations. Spencer Thomas. - In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991. -(4) Recovering the data from the transformation matrix. Ronald Goldman. - In "Graphics Gems II", pp 324-331. Morgan Kaufmann, 1991. -(5) Euler angle conversion. Ken Shoemake. - In "Graphics Gems IV", pp 222-229. Morgan Kaufmann, 1994. -(6) Arcball rotation control. Ken Shoemake. - In "Graphics Gems IV", pp 175-192. Morgan Kaufmann, 1994. -(7) Representing attitude: Euler angles, unit quaternions, and rotation - vectors. James Diebel. 2006. -(8) A discussion of the solution for the best rotation to relate two sets - of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828. -(9) Closed-form solution of absolute orientation using unit quaternions. - BKP Horn. J Opt Soc Am A. 1987. 4(4), 629-642. -(10) Quaternions. Ken Shoemake. - http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf -(11) From quaternion to matrix and back. JMP van Waveren. 2005. - http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm -(12) Uniform random rotations. Ken Shoemake. - In "Graphics Gems III", pp 124-132. Morgan Kaufmann, 1992. - - -Examples --------- - ->>> alpha, beta, gamma = 0.123, -1.234, 2.345 ->>> origin, xaxis, yaxis, zaxis = (0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1) ->>> I = identity_matrix() ->>> Rx = rotation_matrix(alpha, xaxis) ->>> Ry = rotation_matrix(beta, yaxis) ->>> Rz = rotation_matrix(gamma, zaxis) ->>> R = concatenate_matrices(Rx, Ry, Rz) ->>> euler = euler_from_matrix(R, 'rxyz') ->>> numpy.allclose([alpha, beta, gamma], euler) -True ->>> Re = euler_matrix(alpha, beta, gamma, 'rxyz') ->>> is_same_transform(R, Re) -True ->>> al, be, ga = euler_from_matrix(Re, 'rxyz') ->>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz')) -True ->>> qx = quaternion_about_axis(alpha, xaxis) ->>> qy = quaternion_about_axis(beta, yaxis) ->>> qz = quaternion_about_axis(gamma, zaxis) ->>> q = quaternion_multiply(qx, qy) ->>> q = quaternion_multiply(q, qz) ->>> Rq = quaternion_matrix(q) ->>> is_same_transform(R, Rq) -True ->>> S = scale_matrix(1.23, origin) ->>> T = translation_matrix((1, 2, 3)) ->>> Z = shear_matrix(beta, xaxis, origin, zaxis) ->>> R = random_rotation_matrix(numpy.random.rand(3)) ->>> M = concatenate_matrices(T, R, Z, S) ->>> scale, shear, angles, trans, persp = decompose_matrix(M) ->>> numpy.allclose(scale, 1.23) -True ->>> numpy.allclose(trans, (1, 2, 3)) -True ->>> numpy.allclose(shear, (0, math.tan(beta), 0)) -True ->>> is_same_transform(R, euler_matrix(axes='sxyz', *angles)) -True ->>> M1 = compose_matrix(scale, shear, angles, trans, persp) ->>> is_same_transform(M, M1) -True - -""" - -from __future__ import division - -import warnings -import math - -import numpy - -# Documentation in HTML format can be generated with Epydoc -__docformat__ = "restructuredtext en" - - -def identity_matrix(): - """Return 4x4 identity/unit matrix. - - >>> I = identity_matrix() - >>> numpy.allclose(I, numpy.dot(I, I)) - True - >>> numpy.sum(I), numpy.trace(I) - (4.0, 4.0) - >>> numpy.allclose(I, numpy.identity(4, dtype=numpy.float64)) - True - - """ - return numpy.identity(4, dtype=numpy.float64) - - -def translation_matrix(direction): - """Return matrix to translate by direction vector. - - >>> v = numpy.random.random(3) - 0.5 - >>> numpy.allclose(v, translation_matrix(v)[:3, 3]) - True - - """ - M = numpy.identity(4) - M[:3, 3] = direction[:3] - return M - - -def translation_from_matrix(matrix): - """Return translation vector from translation matrix. - - >>> v0 = numpy.random.random(3) - 0.5 - >>> v1 = translation_from_matrix(translation_matrix(v0)) - >>> numpy.allclose(v0, v1) - True - - """ - return numpy.array(matrix, copy=False)[:3, 3].copy() - - -def reflection_matrix(point, normal): - """Return matrix to mirror at plane defined by point and normal vector. - - >>> v0 = numpy.random.random(4) - 0.5 - >>> v0[3] = 1.0 - >>> v1 = numpy.random.random(3) - 0.5 - >>> R = reflection_matrix(v0, v1) - >>> numpy.allclose(2., numpy.trace(R)) - True - >>> numpy.allclose(v0, numpy.dot(R, v0)) - True - >>> v2 = v0.copy() - >>> v2[:3] += v1 - >>> v3 = v0.copy() - >>> v2[:3] -= v1 - >>> numpy.allclose(v2, numpy.dot(R, v3)) - True - - """ - normal = unit_vector(normal[:3]) - M = numpy.identity(4) - M[:3, :3] -= 2.0 * numpy.outer(normal, normal) - M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal - return M - - -def reflection_from_matrix(matrix): - """Return mirror plane point and normal vector from reflection matrix. - - >>> v0 = numpy.random.random(3) - 0.5 - >>> v1 = numpy.random.random(3) - 0.5 - >>> M0 = reflection_matrix(v0, v1) - >>> point, normal = reflection_from_matrix(M0) - >>> M1 = reflection_matrix(point, normal) - >>> is_same_transform(M0, M1) - True - - """ - M = numpy.array(matrix, dtype=numpy.float64, copy=False) - # normal: unit eigenvector corresponding to eigenvalue -1 - l, V = numpy.linalg.eig(M[:3, :3]) - i = numpy.where(abs(numpy.real(l) + 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no unit eigenvector corresponding to eigenvalue -1") - normal = numpy.real(V[:, i[0]]).squeeze() - # point: any unit eigenvector corresponding to eigenvalue 1 - l, V = numpy.linalg.eig(M) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no unit eigenvector corresponding to eigenvalue 1") - point = numpy.real(V[:, i[-1]]).squeeze() - point /= point[3] - return point, normal - - -def rotation_matrix(angle, direction, point=None): - """Return matrix to rotate about axis defined by point and direction. - - >>> angle = (random.random() - 0.5) * (2*math.pi) - >>> direc = numpy.random.random(3) - 0.5 - >>> point = numpy.random.random(3) - 0.5 - >>> R0 = rotation_matrix(angle, direc, point) - >>> R1 = rotation_matrix(angle-2*math.pi, direc, point) - >>> is_same_transform(R0, R1) - True - >>> R0 = rotation_matrix(angle, direc, point) - >>> R1 = rotation_matrix(-angle, -direc, point) - >>> is_same_transform(R0, R1) - True - >>> I = numpy.identity(4, numpy.float64) - >>> numpy.allclose(I, rotation_matrix(math.pi*2, direc)) - True - >>> numpy.allclose(2., numpy.trace(rotation_matrix(math.pi/2, - ... direc, point))) - True - - """ - sina = math.sin(angle) - cosa = math.cos(angle) - direction = unit_vector(direction[:3]) - # rotation matrix around unit vector - R = numpy.array(((cosa, 0.0, 0.0), - (0.0, cosa, 0.0), - (0.0, 0.0, cosa)), dtype=numpy.float64) - R += numpy.outer(direction, direction) * (1.0 - cosa) - direction *= sina - R += numpy.array((( 0.0, -direction[2], direction[1]), - ( direction[2], 0.0, -direction[0]), - (-direction[1], direction[0], 0.0)), - dtype=numpy.float64) - M = numpy.identity(4) - M[:3, :3] = R - if point is not None: - # rotation not around origin - point = numpy.array(point[:3], dtype=numpy.float64, copy=False) - M[:3, 3] = point - numpy.dot(R, point) - return M - - -def rotation_from_matrix(matrix): - """Return rotation angle and axis from rotation matrix. - - >>> angle = (random.random() - 0.5) * (2*math.pi) - >>> direc = numpy.random.random(3) - 0.5 - >>> point = numpy.random.random(3) - 0.5 - >>> R0 = rotation_matrix(angle, direc, point) - >>> angle, direc, point = rotation_from_matrix(R0) - >>> R1 = rotation_matrix(angle, direc, point) - >>> is_same_transform(R0, R1) - True - - """ - R = numpy.array(matrix, dtype=numpy.float64, copy=False) - R33 = R[:3, :3] - # direction: unit eigenvector of R33 corresponding to eigenvalue of 1 - l, W = numpy.linalg.eig(R33.T) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no unit eigenvector corresponding to eigenvalue 1") - direction = numpy.real(W[:, i[-1]]).squeeze() - # point: unit eigenvector of R33 corresponding to eigenvalue of 1 - l, Q = numpy.linalg.eig(R) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no unit eigenvector corresponding to eigenvalue 1") - point = numpy.real(Q[:, i[-1]]).squeeze() - point /= point[3] - # rotation angle depending on direction - cosa = (numpy.trace(R33) - 1.0) / 2.0 - if abs(direction[2]) > 1e-8: - sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2] - elif abs(direction[1]) > 1e-8: - sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1] - else: - sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0] - angle = math.atan2(sina, cosa) - return angle, direction, point - - -def scale_matrix(factor, origin=None, direction=None): - """Return matrix to scale by factor around origin in direction. - - Use factor -1 for point symmetry. - - >>> v = (numpy.random.rand(4, 5) - 0.5) * 20.0 - >>> v[3] = 1.0 - >>> S = scale_matrix(-1.234) - >>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3]) - True - >>> factor = random.random() * 10 - 5 - >>> origin = numpy.random.random(3) - 0.5 - >>> direct = numpy.random.random(3) - 0.5 - >>> S = scale_matrix(factor, origin) - >>> S = scale_matrix(factor, origin, direct) - - """ - if direction is None: - # uniform scaling - M = numpy.array(((factor, 0.0, 0.0, 0.0), - (0.0, factor, 0.0, 0.0), - (0.0, 0.0, factor, 0.0), - (0.0, 0.0, 0.0, 1.0)), dtype=numpy.float64) - if origin is not None: - M[:3, 3] = origin[:3] - M[:3, 3] *= 1.0 - factor - else: - # nonuniform scaling - direction = unit_vector(direction[:3]) - factor = 1.0 - factor - M = numpy.identity(4) - M[:3, :3] -= factor * numpy.outer(direction, direction) - if origin is not None: - M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction - return M - - -def scale_from_matrix(matrix): - """Return scaling factor, origin and direction from scaling matrix. - - >>> factor = random.random() * 10 - 5 - >>> origin = numpy.random.random(3) - 0.5 - >>> direct = numpy.random.random(3) - 0.5 - >>> S0 = scale_matrix(factor, origin) - >>> factor, origin, direction = scale_from_matrix(S0) - >>> S1 = scale_matrix(factor, origin, direction) - >>> is_same_transform(S0, S1) - True - >>> S0 = scale_matrix(factor, origin, direct) - >>> factor, origin, direction = scale_from_matrix(S0) - >>> S1 = scale_matrix(factor, origin, direction) - >>> is_same_transform(S0, S1) - True - - """ - M = numpy.array(matrix, dtype=numpy.float64, copy=False) - M33 = M[:3, :3] - factor = numpy.trace(M33) - 2.0 - try: - # direction: unit eigenvector corresponding to eigenvalue factor - l, V = numpy.linalg.eig(M33) - i = numpy.where(abs(numpy.real(l) - factor) < 1e-8)[0][0] - direction = numpy.real(V[:, i]).squeeze() - direction /= vector_norm(direction) - except IndexError: - # uniform scaling - factor = (factor + 2.0) / 3.0 - direction = None - # origin: any eigenvector corresponding to eigenvalue 1 - l, V = numpy.linalg.eig(M) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no eigenvector corresponding to eigenvalue 1") - origin = numpy.real(V[:, i[-1]]).squeeze() - origin /= origin[3] - return factor, origin, direction - - -def projection_matrix(point, normal, direction=None, - perspective=None, pseudo=False): - """Return matrix to project onto plane defined by point and normal. - - Using either perspective point, projection direction, or none of both. - - If pseudo is True, perspective projections will preserve relative depth - such that Perspective = dot(Orthogonal, PseudoPerspective). - - >>> P = projection_matrix((0, 0, 0), (1, 0, 0)) - >>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:]) - True - >>> point = numpy.random.random(3) - 0.5 - >>> normal = numpy.random.random(3) - 0.5 - >>> direct = numpy.random.random(3) - 0.5 - >>> persp = numpy.random.random(3) - 0.5 - >>> P0 = projection_matrix(point, normal) - >>> P1 = projection_matrix(point, normal, direction=direct) - >>> P2 = projection_matrix(point, normal, perspective=persp) - >>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True) - >>> is_same_transform(P2, numpy.dot(P0, P3)) - True - >>> P = projection_matrix((3, 0, 0), (1, 1, 0), (1, 0, 0)) - >>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20.0 - >>> v0[3] = 1.0 - >>> v1 = numpy.dot(P, v0) - >>> numpy.allclose(v1[1], v0[1]) - True - >>> numpy.allclose(v1[0], 3.0-v1[1]) - True - - """ - M = numpy.identity(4) - point = numpy.array(point[:3], dtype=numpy.float64, copy=False) - normal = unit_vector(normal[:3]) - if perspective is not None: - # perspective projection - perspective = numpy.array(perspective[:3], dtype=numpy.float64, - copy=False) - M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal) - M[:3, :3] -= numpy.outer(perspective, normal) - if pseudo: - # preserve relative depth - M[:3, :3] -= numpy.outer(normal, normal) - M[:3, 3] = numpy.dot(point, normal) * (perspective+normal) - else: - M[:3, 3] = numpy.dot(point, normal) * perspective - M[3, :3] = -normal - M[3, 3] = numpy.dot(perspective, normal) - elif direction is not None: - # parallel projection - direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False) - scale = numpy.dot(direction, normal) - M[:3, :3] -= numpy.outer(direction, normal) / scale - M[:3, 3] = direction * (numpy.dot(point, normal) / scale) - else: - # orthogonal projection - M[:3, :3] -= numpy.outer(normal, normal) - M[:3, 3] = numpy.dot(point, normal) * normal - return M - - -def projection_from_matrix(matrix, pseudo=False): - """Return projection plane and perspective point from projection matrix. - - Return values are same as arguments for projection_matrix function: - point, normal, direction, perspective, and pseudo. - - >>> point = numpy.random.random(3) - 0.5 - >>> normal = numpy.random.random(3) - 0.5 - >>> direct = numpy.random.random(3) - 0.5 - >>> persp = numpy.random.random(3) - 0.5 - >>> P0 = projection_matrix(point, normal) - >>> result = projection_from_matrix(P0) - >>> P1 = projection_matrix(*result) - >>> is_same_transform(P0, P1) - True - >>> P0 = projection_matrix(point, normal, direct) - >>> result = projection_from_matrix(P0) - >>> P1 = projection_matrix(*result) - >>> is_same_transform(P0, P1) - True - >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False) - >>> result = projection_from_matrix(P0, pseudo=False) - >>> P1 = projection_matrix(*result) - >>> is_same_transform(P0, P1) - True - >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True) - >>> result = projection_from_matrix(P0, pseudo=True) - >>> P1 = projection_matrix(*result) - >>> is_same_transform(P0, P1) - True - - """ - M = numpy.array(matrix, dtype=numpy.float64, copy=False) - M33 = M[:3, :3] - l, V = numpy.linalg.eig(M) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] - if not pseudo and len(i): - # point: any eigenvector corresponding to eigenvalue 1 - point = numpy.real(V[:, i[-1]]).squeeze() - point /= point[3] - # direction: unit eigenvector corresponding to eigenvalue 0 - l, V = numpy.linalg.eig(M33) - i = numpy.where(abs(numpy.real(l)) < 1e-8)[0] - if not len(i): - raise ValueError("no eigenvector corresponding to eigenvalue 0") - direction = numpy.real(V[:, i[0]]).squeeze() - direction /= vector_norm(direction) - # normal: unit eigenvector of M33.T corresponding to eigenvalue 0 - l, V = numpy.linalg.eig(M33.T) - i = numpy.where(abs(numpy.real(l)) < 1e-8)[0] - if len(i): - # parallel projection - normal = numpy.real(V[:, i[0]]).squeeze() - normal /= vector_norm(normal) - return point, normal, direction, None, False - else: - # orthogonal projection, where normal equals direction vector - return point, direction, None, None, False - else: - # perspective projection - i = numpy.where(abs(numpy.real(l)) > 1e-8)[0] - if not len(i): - raise ValueError( - "no eigenvector not corresponding to eigenvalue 0") - point = numpy.real(V[:, i[-1]]).squeeze() - point /= point[3] - normal = - M[3, :3] - perspective = M[:3, 3] / numpy.dot(point[:3], normal) - if pseudo: - perspective -= normal - return point, normal, None, perspective, pseudo - - -def clip_matrix(left, right, bottom, top, near, far, perspective=False): - """Return matrix to obtain normalized device coordinates from frustrum. - - The frustrum bounds are axis-aligned along x (left, right), - y (bottom, top) and z (near, far). - - Normalized device coordinates are in range [-1, 1] if coordinates are - inside the frustrum. - - If perspective is True the frustrum is a truncated pyramid with the - perspective point at origin and direction along z axis, otherwise an - orthographic canonical view volume (a box). - - Homogeneous coordinates transformed by the perspective clip matrix - need to be dehomogenized (devided by w coordinate). - - >>> frustrum = numpy.random.rand(6) - >>> frustrum[1] += frustrum[0] - >>> frustrum[3] += frustrum[2] - >>> frustrum[5] += frustrum[4] - >>> M = clip_matrix(*frustrum, perspective=False) - >>> numpy.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0]) - array([-1., -1., -1., 1.]) - >>> numpy.dot(M, [frustrum[1], frustrum[3], frustrum[5], 1.0]) - array([ 1., 1., 1., 1.]) - >>> M = clip_matrix(*frustrum, perspective=True) - >>> v = numpy.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0]) - >>> v / v[3] - array([-1., -1., -1., 1.]) - >>> v = numpy.dot(M, [frustrum[1], frustrum[3], frustrum[4], 1.0]) - >>> v / v[3] - array([ 1., 1., -1., 1.]) - - """ - if left >= right or bottom >= top or near >= far: - raise ValueError("invalid frustrum") - if perspective: - if near <= _EPS: - raise ValueError("invalid frustrum: near <= 0") - t = 2.0 * near - M = ((-t/(right-left), 0.0, (right+left)/(right-left), 0.0), - (0.0, -t/(top-bottom), (top+bottom)/(top-bottom), 0.0), - (0.0, 0.0, -(far+near)/(far-near), t*far/(far-near)), - (0.0, 0.0, -1.0, 0.0)) - else: - M = ((2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)), - (0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)), - (0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)), - (0.0, 0.0, 0.0, 1.0)) - return numpy.array(M, dtype=numpy.float64) - - -def shear_matrix(angle, direction, point, normal): - """Return matrix to shear by angle along direction vector on shear plane. - - The shear plane is defined by a point and normal vector. The direction - vector must be orthogonal to the plane's normal vector. - - A point P is transformed by the shear matrix into P" such that - the vector P-P" is parallel to the direction vector and its extent is - given by the angle of P-P'-P", where P' is the orthogonal projection - of P onto the shear plane. - - >>> angle = (random.random() - 0.5) * 4*math.pi - >>> direct = numpy.random.random(3) - 0.5 - >>> point = numpy.random.random(3) - 0.5 - >>> normal = numpy.cross(direct, numpy.random.random(3)) - >>> S = shear_matrix(angle, direct, point, normal) - >>> numpy.allclose(1.0, numpy.linalg.det(S)) - True - - """ - normal = unit_vector(normal[:3]) - direction = unit_vector(direction[:3]) - if abs(numpy.dot(normal, direction)) > 1e-6: - raise ValueError("direction and normal vectors are not orthogonal") - angle = math.tan(angle) - M = numpy.identity(4) - M[:3, :3] += angle * numpy.outer(direction, normal) - M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction - return M - - -def shear_from_matrix(matrix): - """Return shear angle, direction and plane from shear matrix. - - >>> angle = (random.random() - 0.5) * 4*math.pi - >>> direct = numpy.random.random(3) - 0.5 - >>> point = numpy.random.random(3) - 0.5 - >>> normal = numpy.cross(direct, numpy.random.random(3)) - >>> S0 = shear_matrix(angle, direct, point, normal) - >>> angle, direct, point, normal = shear_from_matrix(S0) - >>> S1 = shear_matrix(angle, direct, point, normal) - >>> is_same_transform(S0, S1) - True - - """ - M = numpy.array(matrix, dtype=numpy.float64, copy=False) - M33 = M[:3, :3] - # normal: cross independent eigenvectors corresponding to the eigenvalue 1 - l, V = numpy.linalg.eig(M33) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-4)[0] - if len(i) < 2: - raise ValueError("No two linear independent eigenvectors found {}".format(l)) - V = numpy.real(V[:, i]).squeeze().T - lenorm = -1.0 - for i0, i1 in ((0, 1), (0, 2), (1, 2)): - n = numpy.cross(V[i0], V[i1]) - l = vector_norm(n) - if l > lenorm: - lenorm = l - normal = n - normal /= lenorm - # direction and angle - direction = numpy.dot(M33 - numpy.identity(3), normal) - angle = vector_norm(direction) - direction /= angle - angle = math.atan(angle) - # point: eigenvector corresponding to eigenvalue 1 - l, V = numpy.linalg.eig(M) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no eigenvector corresponding to eigenvalue 1") - point = numpy.real(V[:, i[-1]]).squeeze() - point /= point[3] - return angle, direction, point, normal - - -def decompose_matrix(matrix): - """Return sequence of transformations from transformation matrix. - - matrix : array_like - Non-degenerative homogeneous transformation matrix - - Return tuple of: - scale : vector of 3 scaling factors - shear : list of shear factors for x-y, x-z, y-z axes - angles : list of Euler angles about static x, y, z axes - translate : translation vector along x, y, z axes - perspective : perspective partition of matrix - - Raise ValueError if matrix is of wrong type or degenerative. - - >>> T0 = translation_matrix((1, 2, 3)) - >>> scale, shear, angles, trans, persp = decompose_matrix(T0) - >>> T1 = translation_matrix(trans) - >>> numpy.allclose(T0, T1) - True - >>> S = scale_matrix(0.123) - >>> scale, shear, angles, trans, persp = decompose_matrix(S) - >>> scale[0] - 0.123 - >>> R0 = euler_matrix(1, 2, 3) - >>> scale, shear, angles, trans, persp = decompose_matrix(R0) - >>> R1 = euler_matrix(*angles) - >>> numpy.allclose(R0, R1) - True - - """ - M = numpy.array(matrix, dtype=numpy.float64, copy=True).T - if abs(M[3, 3]) < _EPS: - raise ValueError("M[3, 3] is zero") - M /= M[3, 3] - P = M.copy() - P[:, 3] = 0, 0, 0, 1 - if not numpy.linalg.det(P): - raise ValueError("Matrix is singular") - - scale = numpy.zeros((3, ), dtype=numpy.float64) - shear = [0, 0, 0] - angles = [0, 0, 0] - - if any(abs(M[:3, 3]) > _EPS): - perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T)) - M[:, 3] = 0, 0, 0, 1 - else: - perspective = numpy.array((0, 0, 0, 1), dtype=numpy.float64) - - translate = M[3, :3].copy() - M[3, :3] = 0 - - row = M[:3, :3].copy() - scale[0] = vector_norm(row[0]) - row[0] /= scale[0] - shear[0] = numpy.dot(row[0], row[1]) - row[1] -= row[0] * shear[0] - scale[1] = vector_norm(row[1]) - row[1] /= scale[1] - shear[0] /= scale[1] - shear[1] = numpy.dot(row[0], row[2]) - row[2] -= row[0] * shear[1] - shear[2] = numpy.dot(row[1], row[2]) - row[2] -= row[1] * shear[2] - scale[2] = vector_norm(row[2]) - row[2] /= scale[2] - shear[1:] /= scale[2] - - if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0: - scale *= -1 - row *= -1 - - angles[1] = math.asin(-row[0, 2]) - if math.cos(angles[1]): - angles[0] = math.atan2(row[1, 2], row[2, 2]) - angles[2] = math.atan2(row[0, 1], row[0, 0]) - else: - #angles[0] = math.atan2(row[1, 0], row[1, 1]) - angles[0] = math.atan2(-row[2, 1], row[1, 1]) - angles[2] = 0.0 - - return scale, shear, angles, translate, perspective - - -def compose_matrix(scale=None, shear=None, angles=None, translate=None, - perspective=None): - """Return transformation matrix from sequence of transformations. - - This is the inverse of the decompose_matrix function. - - Sequence of transformations: - scale : vector of 3 scaling factors - shear : list of shear factors for x-y, x-z, y-z axes - angles : list of Euler angles about static x, y, z axes - translate : translation vector along x, y, z axes - perspective : perspective partition of matrix - - >>> scale = numpy.random.random(3) - 0.5 - >>> shear = numpy.random.random(3) - 0.5 - >>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi) - >>> trans = numpy.random.random(3) - 0.5 - >>> persp = numpy.random.random(4) - 0.5 - >>> M0 = compose_matrix(scale, shear, angles, trans, persp) - >>> result = decompose_matrix(M0) - >>> M1 = compose_matrix(*result) - >>> is_same_transform(M0, M1) - True - - """ - M = numpy.identity(4) - if perspective is not None: - P = numpy.identity(4) - P[3, :] = perspective[:4] - M = numpy.dot(M, P) - if translate is not None: - T = numpy.identity(4) - T[:3, 3] = translate[:3] - M = numpy.dot(M, T) - if angles is not None: - R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz') - M = numpy.dot(M, R) - if shear is not None: - Z = numpy.identity(4) - Z[1, 2] = shear[2] - Z[0, 2] = shear[1] - Z[0, 1] = shear[0] - M = numpy.dot(M, Z) - if scale is not None: - S = numpy.identity(4) - S[0, 0] = scale[0] - S[1, 1] = scale[1] - S[2, 2] = scale[2] - M = numpy.dot(M, S) - M /= M[3, 3] - return M - - -def orthogonalization_matrix(lengths, angles): - """Return orthogonalization matrix for crystallographic cell coordinates. - - Angles are expected in degrees. - - The de-orthogonalization matrix is the inverse. - - >>> O = orthogonalization_matrix((10., 10., 10.), (90., 90., 90.)) - >>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10) - True - >>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7]) - >>> numpy.allclose(numpy.sum(O), 43.063229) - True - - """ - a, b, c = lengths - angles = numpy.radians(angles) - sina, sinb, _ = numpy.sin(angles) - cosa, cosb, cosg = numpy.cos(angles) - co = (cosa * cosb - cosg) / (sina * sinb) - return numpy.array(( - ( a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0), - (-a*sinb*co, b*sina, 0.0, 0.0), - ( a*cosb, b*cosa, c, 0.0), - ( 0.0, 0.0, 0.0, 1.0)), - dtype=numpy.float64) - - -def superimposition_matrix(v0, v1, scaling=False, usesvd=True): - """Return matrix to transform given vector set into second vector set. - - v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 vectors. - - If usesvd is True, the weighted sum of squared deviations (RMSD) is - minimized according to the algorithm by W. Kabsch [8]. Otherwise the - quaternion based algorithm by B. Horn [9] is used (slower when using - this Python implementation). - - The returned matrix performs rotation, translation and uniform scaling - (if specified). - - >>> v0 = numpy.random.rand(3, 10) - >>> M = superimposition_matrix(v0, v0) - >>> numpy.allclose(M, numpy.identity(4)) - True - >>> R = random_rotation_matrix(numpy.random.random(3)) - >>> v0 = ((1,0,0), (0,1,0), (0,0,1), (1,1,1)) - >>> v1 = numpy.dot(R, v0) - >>> M = superimposition_matrix(v0, v1) - >>> numpy.allclose(v1, numpy.dot(M, v0)) - True - >>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20.0 - >>> v0[3] = 1.0 - >>> v1 = numpy.dot(R, v0) - >>> M = superimposition_matrix(v0, v1) - >>> numpy.allclose(v1, numpy.dot(M, v0)) - True - >>> S = scale_matrix(random.random()) - >>> T = translation_matrix(numpy.random.random(3)-0.5) - >>> M = concatenate_matrices(T, R, S) - >>> v1 = numpy.dot(M, v0) - >>> v0[:3] += numpy.random.normal(0.0, 1e-9, 300).reshape(3, -1) - >>> M = superimposition_matrix(v0, v1, scaling=True) - >>> numpy.allclose(v1, numpy.dot(M, v0)) - True - >>> M = superimposition_matrix(v0, v1, scaling=True, usesvd=False) - >>> numpy.allclose(v1, numpy.dot(M, v0)) - True - >>> v = numpy.empty((4, 100, 3), dtype=numpy.float64) - >>> v[:, :, 0] = v0 - >>> M = superimposition_matrix(v0, v1, scaling=True, usesvd=False) - >>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0])) - True - - """ - v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3] - v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3] - - if v0.shape != v1.shape or v0.shape[1] < 3: - raise ValueError("Vector sets are of wrong shape or type.") - - # move centroids to origin - t0 = numpy.mean(v0, axis=1) - t1 = numpy.mean(v1, axis=1) - v0 = v0 - t0.reshape(3, 1) - v1 = v1 - t1.reshape(3, 1) - - if usesvd: - # Singular Value Decomposition of covariance matrix - u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T)) - # rotation matrix from SVD orthonormal bases - R = numpy.dot(u, vh) - if numpy.linalg.det(R) < 0.0: - # R does not constitute right handed system - R -= numpy.outer(u[:, 2], vh[2, :]*2.0) - s[-1] *= -1.0 - # homogeneous transformation matrix - M = numpy.identity(4) - M[:3, :3] = R - else: - # compute symmetric matrix N - xx, yy, zz = numpy.sum(v0 * v1, axis=1) - xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1) - xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1) - N = ((xx+yy+zz, yz-zy, zx-xz, xy-yx), - (yz-zy, xx-yy-zz, xy+yx, zx+xz), - (zx-xz, xy+yx, -xx+yy-zz, yz+zy), - (xy-yx, zx+xz, yz+zy, -xx-yy+zz)) - # quaternion: eigenvector corresponding to most positive eigenvalue - l, V = numpy.linalg.eig(N) - q = V[:, numpy.argmax(l)] - q /= vector_norm(q) # unit quaternion - q = numpy.roll(q, -1) # move w component to end - # homogeneous transformation matrix - M = quaternion_matrix(q) - - # scale: ratio of rms deviations from centroid - if scaling: - v0 *= v0 - v1 *= v1 - M[:3, :3] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0)) - - # translation - M[:3, 3] = t1 - T = numpy.identity(4) - T[:3, 3] = -t0 - M = numpy.dot(M, T) - return M - - -def euler_matrix(ai, aj, ak, axes='sxyz'): - """Return homogeneous rotation matrix from Euler angles and axis sequence. - - ai, aj, ak : Euler's roll, pitch and yaw angles - axes : One of 24 axis sequences as string or encoded tuple - - >>> R = euler_matrix(1, 2, 3, 'syxz') - >>> numpy.allclose(numpy.sum(R[0]), -1.34786452) - True - >>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1)) - >>> numpy.allclose(numpy.sum(R[0]), -0.383436184) - True - >>> ai, aj, ak = (4.0*math.pi) * (numpy.random.random(3) - 0.5) - >>> for axes in _AXES2TUPLE.keys(): - ... R = euler_matrix(ai, aj, ak, axes) - >>> for axes in _TUPLE2AXES.keys(): - ... R = euler_matrix(ai, aj, ak, axes) - - """ - try: - firstaxis, parity, repetition, frame = _AXES2TUPLE[axes] - except (AttributeError, KeyError): - _ = _TUPLE2AXES[axes] - firstaxis, parity, repetition, frame = axes - - i = firstaxis - j = _NEXT_AXIS[i+parity] - k = _NEXT_AXIS[i-parity+1] - - if frame: - ai, ak = ak, ai - if parity: - ai, aj, ak = -ai, -aj, -ak - - si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak) - ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak) - cc, cs = ci*ck, ci*sk - sc, ss = si*ck, si*sk - - M = numpy.identity(4) - if repetition: - M[i, i] = cj - M[i, j] = sj*si - M[i, k] = sj*ci - M[j, i] = sj*sk - M[j, j] = -cj*ss+cc - M[j, k] = -cj*cs-sc - M[k, i] = -sj*ck - M[k, j] = cj*sc+cs - M[k, k] = cj*cc-ss - else: - M[i, i] = cj*ck - M[i, j] = sj*sc-cs - M[i, k] = sj*cc+ss - M[j, i] = cj*sk - M[j, j] = sj*ss+cc - M[j, k] = sj*cs-sc - M[k, i] = -sj - M[k, j] = cj*si - M[k, k] = cj*ci - return M - - -def euler_from_matrix(matrix, axes='sxyz'): - """Return Euler angles from rotation matrix for specified axis sequence. - - axes : One of 24 axis sequences as string or encoded tuple - - Note that many Euler angle triplets can describe one matrix. - - >>> R0 = euler_matrix(1, 2, 3, 'syxz') - >>> al, be, ga = euler_from_matrix(R0, 'syxz') - >>> R1 = euler_matrix(al, be, ga, 'syxz') - >>> numpy.allclose(R0, R1) - True - >>> angles = (4.0*math.pi) * (numpy.random.random(3) - 0.5) - >>> for axes in _AXES2TUPLE.keys(): - ... R0 = euler_matrix(axes=axes, *angles) - ... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes)) - ... if not numpy.allclose(R0, R1): print axes, "failed" - - """ - try: - firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()] - except (AttributeError, KeyError): - _ = _TUPLE2AXES[axes] - firstaxis, parity, repetition, frame = axes - - i = firstaxis - j = _NEXT_AXIS[i+parity] - k = _NEXT_AXIS[i-parity+1] - - M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3] - if repetition: - sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k]) - if sy > _EPS: - ax = math.atan2( M[i, j], M[i, k]) - ay = math.atan2( sy, M[i, i]) - az = math.atan2( M[j, i], -M[k, i]) - else: - ax = math.atan2(-M[j, k], M[j, j]) - ay = math.atan2( sy, M[i, i]) - az = 0.0 - else: - cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i]) - if cy > _EPS: - ax = math.atan2( M[k, j], M[k, k]) - ay = math.atan2(-M[k, i], cy) - az = math.atan2( M[j, i], M[i, i]) - else: - ax = math.atan2(-M[j, k], M[j, j]) - ay = math.atan2(-M[k, i], cy) - az = 0.0 - - if parity: - ax, ay, az = -ax, -ay, -az - if frame: - ax, az = az, ax - return ax, ay, az - - -def euler_from_quaternion(quaternion, axes='sxyz'): - """Return Euler angles from quaternion for specified axis sequence. - - >>> angles = euler_from_quaternion([0.06146124, 0, 0, 0.99810947]) - >>> numpy.allclose(angles, [0.123, 0, 0]) - True - - """ - return euler_from_matrix(quaternion_matrix(quaternion), axes) - - -def quaternion_from_euler(ai, aj, ak, axes='sxyz'): - """Return quaternion from Euler angles and axis sequence. - - ai, aj, ak : Euler's roll, pitch and yaw angles - axes : One of 24 axis sequences as string or encoded tuple - - >>> q = quaternion_from_euler(1, 2, 3, 'ryxz') - >>> numpy.allclose(q, [0.310622, -0.718287, 0.444435, 0.435953]) - True - - """ - try: - firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()] - except (AttributeError, KeyError): - _ = _TUPLE2AXES[axes] - firstaxis, parity, repetition, frame = axes - - i = firstaxis - j = _NEXT_AXIS[i+parity] - k = _NEXT_AXIS[i-parity+1] - - if frame: - ai, ak = ak, ai - if parity: - aj = -aj - - ai /= 2.0 - aj /= 2.0 - ak /= 2.0 - ci = math.cos(ai) - si = math.sin(ai) - cj = math.cos(aj) - sj = math.sin(aj) - ck = math.cos(ak) - sk = math.sin(ak) - cc = ci*ck - cs = ci*sk - sc = si*ck - ss = si*sk - - quaternion = numpy.empty((4, ), dtype=numpy.float64) - if repetition: - quaternion[i] = cj*(cs + sc) - quaternion[j] = sj*(cc + ss) - quaternion[k] = sj*(cs - sc) - quaternion[3] = cj*(cc - ss) - else: - quaternion[i] = cj*sc - sj*cs - quaternion[j] = cj*ss + sj*cc - quaternion[k] = cj*cs - sj*sc - quaternion[3] = cj*cc + sj*ss - if parity: - quaternion[j] *= -1 - - return quaternion - - -def quaternion_about_axis(angle, axis): - """Return quaternion for rotation about axis. - - >>> q = quaternion_about_axis(0.123, (1, 0, 0)) - >>> numpy.allclose(q, [0.06146124, 0, 0, 0.99810947]) - True - - """ - quaternion = numpy.zeros((4, ), dtype=numpy.float64) - quaternion[:3] = axis[:3] - qlen = vector_norm(quaternion) - if qlen > _EPS: - quaternion *= math.sin(angle/2.0) / qlen - quaternion[3] = math.cos(angle/2.0) - return quaternion - - -def quaternion_matrix(quaternion): - """Return homogeneous rotation matrix from quaternion. - - >>> R = quaternion_matrix([0.06146124, 0, 0, 0.99810947]) - >>> numpy.allclose(R, rotation_matrix(0.123, (1, 0, 0))) - True - - """ - q = numpy.array(quaternion[:4], dtype=numpy.float64, copy=True) - nq = numpy.dot(q, q) - if nq < _EPS: - return numpy.identity(4) - q *= math.sqrt(2.0 / nq) - q = numpy.outer(q, q) - return numpy.array(( - (1.0-q[1, 1]-q[2, 2], q[0, 1]-q[2, 3], q[0, 2]+q[1, 3], 0.0), - ( q[0, 1]+q[2, 3], 1.0-q[0, 0]-q[2, 2], q[1, 2]-q[0, 3], 0.0), - ( q[0, 2]-q[1, 3], q[1, 2]+q[0, 3], 1.0-q[0, 0]-q[1, 1], 0.0), - ( 0.0, 0.0, 0.0, 1.0) - ), dtype=numpy.float64) - - -def quaternion_from_matrix(matrix): - """Return quaternion from rotation matrix. - - >>> R = rotation_matrix(0.123, (1, 2, 3)) - >>> q = quaternion_from_matrix(R) - >>> numpy.allclose(q, [0.0164262, 0.0328524, 0.0492786, 0.9981095]) - True - - """ - q = numpy.empty((4, ), dtype=numpy.float64) - M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4] - t = numpy.trace(M) - if t > M[3, 3]: - q[3] = t - q[2] = M[1, 0] - M[0, 1] - q[1] = M[0, 2] - M[2, 0] - q[0] = M[2, 1] - M[1, 2] - else: - i, j, k = 0, 1, 2 - if M[1, 1] > M[0, 0]: - i, j, k = 1, 2, 0 - if M[2, 2] > M[i, i]: - i, j, k = 2, 0, 1 - t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3] - q[i] = t - q[j] = M[i, j] + M[j, i] - q[k] = M[k, i] + M[i, k] - q[3] = M[k, j] - M[j, k] - q *= 0.5 / math.sqrt(t * M[3, 3]) - return q - - -def quaternion_multiply(quaternion1, quaternion0): - """Return multiplication of two quaternions. - - >>> q = quaternion_multiply([1, -2, 3, 4], [-5, 6, 7, 8]) - >>> numpy.allclose(q, [-44, -14, 48, 28]) - True - - """ - x0, y0, z0, w0 = quaternion0 - x1, y1, z1, w1 = quaternion1 - return numpy.array(( - x1*w0 + y1*z0 - z1*y0 + w1*x0, - -x1*z0 + y1*w0 + z1*x0 + w1*y0, - x1*y0 - y1*x0 + z1*w0 + w1*z0, - -x1*x0 - y1*y0 - z1*z0 + w1*w0), dtype=numpy.float64) - - -def quaternion_conjugate(quaternion): - """Return conjugate of quaternion. - - >>> q0 = random_quaternion() - >>> q1 = quaternion_conjugate(q0) - >>> q1[3] == q0[3] and all(q1[:3] == -q0[:3]) - True - - """ - return numpy.array((-quaternion[0], -quaternion[1], - -quaternion[2], quaternion[3]), dtype=numpy.float64) - - -def quaternion_inverse(quaternion): - """Return inverse of quaternion. - - >>> q0 = random_quaternion() - >>> q1 = quaternion_inverse(q0) - >>> numpy.allclose(quaternion_multiply(q0, q1), [0, 0, 0, 1]) - True - - """ - return quaternion_conjugate(quaternion) / numpy.dot(quaternion, quaternion) - - -def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True): - """Return spherical linear interpolation between two quaternions. - - >>> q0 = random_quaternion() - >>> q1 = random_quaternion() - >>> q = quaternion_slerp(q0, q1, 0.0) - >>> numpy.allclose(q, q0) - True - >>> q = quaternion_slerp(q0, q1, 1.0, 1) - >>> numpy.allclose(q, q1) - True - >>> q = quaternion_slerp(q0, q1, 0.5) - >>> angle = math.acos(numpy.dot(q0, q)) - >>> numpy.allclose(2.0, math.acos(numpy.dot(q0, q1)) / angle) or \ - numpy.allclose(2.0, math.acos(-numpy.dot(q0, q1)) / angle) - True - - """ - q0 = unit_vector(quat0[:4]) - q1 = unit_vector(quat1[:4]) - if fraction == 0.0: - return q0 - elif fraction == 1.0: - return q1 - d = numpy.dot(q0, q1) - if abs(abs(d) - 1.0) < _EPS: - return q0 - if shortestpath and d < 0.0: - # invert rotation - d = -d - q1 *= -1.0 - angle = math.acos(d) + spin * math.pi - if abs(angle) < _EPS: - return q0 - isin = 1.0 / math.sin(angle) - q0 *= math.sin((1.0 - fraction) * angle) * isin - q1 *= math.sin(fraction * angle) * isin - q0 += q1 - return q0 - - -def random_quaternion(rand=None): - """Return uniform random unit quaternion. - - rand: array like or None - Three independent random variables that are uniformly distributed - between 0 and 1. - - >>> q = random_quaternion() - >>> numpy.allclose(1.0, vector_norm(q)) - True - >>> q = random_quaternion(numpy.random.random(3)) - >>> q.shape - (4,) - - """ - if rand is None: - rand = numpy.random.rand(3) - else: - assert len(rand) == 3 - r1 = numpy.sqrt(1.0 - rand[0]) - r2 = numpy.sqrt(rand[0]) - pi2 = math.pi * 2.0 - t1 = pi2 * rand[1] - t2 = pi2 * rand[2] - return numpy.array((numpy.sin(t1)*r1, - numpy.cos(t1)*r1, - numpy.sin(t2)*r2, - numpy.cos(t2)*r2), dtype=numpy.float64) - - -def random_rotation_matrix(rand=None): - """Return uniform random rotation matrix. - - rnd: array like - Three independent random variables that are uniformly distributed - between 0 and 1 for each returned quaternion. - - >>> R = random_rotation_matrix() - >>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4)) - True - - """ - return quaternion_matrix(random_quaternion(rand)) - - -class Arcball(object): - """Virtual Trackball Control. - - >>> ball = Arcball() - >>> ball = Arcball(initial=numpy.identity(4)) - >>> ball.place([320, 320], 320) - >>> ball.down([500, 250]) - >>> ball.drag([475, 275]) - >>> R = ball.matrix() - >>> numpy.allclose(numpy.sum(R), 3.90583455) - True - >>> ball = Arcball(initial=[0, 0, 0, 1]) - >>> ball.place([320, 320], 320) - >>> ball.setaxes([1,1,0], [-1, 1, 0]) - >>> ball.setconstrain(True) - >>> ball.down([400, 200]) - >>> ball.drag([200, 400]) - >>> R = ball.matrix() - >>> numpy.allclose(numpy.sum(R), 0.2055924) - True - >>> ball.next() - - """ - - def __init__(self, initial=None): - """Initialize virtual trackball control. - - initial : quaternion or rotation matrix - - """ - self._axis = None - self._axes = None - self._radius = 1.0 - self._center = [0.0, 0.0] - self._vdown = numpy.array([0, 0, 1], dtype=numpy.float64) - self._constrain = False - - if initial is None: - self._qdown = numpy.array([0, 0, 0, 1], dtype=numpy.float64) - else: - initial = numpy.array(initial, dtype=numpy.float64) - if initial.shape == (4, 4): - self._qdown = quaternion_from_matrix(initial) - elif initial.shape == (4, ): - initial /= vector_norm(initial) - self._qdown = initial - else: - raise ValueError("initial not a quaternion or matrix.") - - self._qnow = self._qpre = self._qdown - - def place(self, center, radius): - """Place Arcball, e.g. when window size changes. - - center : sequence[2] - Window coordinates of trackball center. - radius : float - Radius of trackball in window coordinates. - - """ - self._radius = float(radius) - self._center[0] = center[0] - self._center[1] = center[1] - - def setaxes(self, *axes): - """Set axes to constrain rotations.""" - if axes is None: - self._axes = None - else: - self._axes = [unit_vector(axis) for axis in axes] - - def setconstrain(self, constrain): - """Set state of constrain to axis mode.""" - self._constrain = constrain == True - - def getconstrain(self): - """Return state of constrain to axis mode.""" - return self._constrain - - def down(self, point): - """Set initial cursor window coordinates and pick constrain-axis.""" - self._vdown = arcball_map_to_sphere(point, self._center, self._radius) - self._qdown = self._qpre = self._qnow - - if self._constrain and self._axes is not None: - self._axis = arcball_nearest_axis(self._vdown, self._axes) - self._vdown = arcball_constrain_to_axis(self._vdown, self._axis) - else: - self._axis = None - - def drag(self, point): - """Update current cursor window coordinates.""" - vnow = arcball_map_to_sphere(point, self._center, self._radius) - - if self._axis is not None: - vnow = arcball_constrain_to_axis(vnow, self._axis) - - self._qpre = self._qnow - - t = numpy.cross(self._vdown, vnow) - if numpy.dot(t, t) < _EPS: - self._qnow = self._qdown - else: - q = [t[0], t[1], t[2], numpy.dot(self._vdown, vnow)] - self._qnow = quaternion_multiply(q, self._qdown) - - def next(self, acceleration=0.0): - """Continue rotation in direction of last drag.""" - q = quaternion_slerp(self._qpre, self._qnow, 2.0+acceleration, False) - self._qpre, self._qnow = self._qnow, q - - def matrix(self): - """Return homogeneous rotation matrix.""" - return quaternion_matrix(self._qnow) - - -def arcball_map_to_sphere(point, center, radius): - """Return unit sphere coordinates from window coordinates.""" - v = numpy.array(((point[0] - center[0]) / radius, - (center[1] - point[1]) / radius, - 0.0), dtype=numpy.float64) - n = v[0]*v[0] + v[1]*v[1] - if n > 1.0: - v /= math.sqrt(n) # position outside of sphere - else: - v[2] = math.sqrt(1.0 - n) - return v - - -def arcball_constrain_to_axis(point, axis): - """Return sphere point perpendicular to axis.""" - v = numpy.array(point, dtype=numpy.float64, copy=True) - a = numpy.array(axis, dtype=numpy.float64, copy=True) - v -= a * numpy.dot(a, v) # on plane - n = vector_norm(v) - if n > _EPS: - if v[2] < 0.0: - v *= -1.0 - v /= n - return v - if a[2] == 1.0: - return numpy.array([1, 0, 0], dtype=numpy.float64) - return unit_vector([-a[1], a[0], 0]) - - -def arcball_nearest_axis(point, axes): - """Return axis, which arc is nearest to point.""" - point = numpy.array(point, dtype=numpy.float64, copy=False) - nearest = None - mx = -1.0 - for axis in axes: - t = numpy.dot(arcball_constrain_to_axis(point, axis), point) - if t > mx: - nearest = axis - mx = t - return nearest - - -# epsilon for testing whether a number is close to zero -_EPS = numpy.finfo(float).eps * 4.0 - -# axis sequences for Euler angles -_NEXT_AXIS = [1, 2, 0, 1] - -# map axes strings to/from tuples of inner axis, parity, repetition, frame -_AXES2TUPLE = { - 'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0), - 'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0), - 'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0), - 'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0), - 'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1), - 'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1), - 'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1), - 'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)} - -_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items()) - -# helper functions - -def vector_norm(data, axis=None, out=None): - """Return length, i.e. eucledian norm, of ndarray along axis. - - >>> v = numpy.random.random(3) - >>> n = vector_norm(v) - >>> numpy.allclose(n, numpy.linalg.norm(v)) - True - >>> v = numpy.random.rand(6, 5, 3) - >>> n = vector_norm(v, axis=-1) - >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2))) - True - >>> n = vector_norm(v, axis=1) - >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) - True - >>> v = numpy.random.rand(5, 4, 3) - >>> n = numpy.empty((5, 3), dtype=numpy.float64) - >>> vector_norm(v, axis=1, out=n) - >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) - True - >>> vector_norm([]) - 0.0 - >>> vector_norm([1.0]) - 1.0 - - """ - data = numpy.array(data, dtype=numpy.float64, copy=True) - if out is None: - if data.ndim == 1: - return math.sqrt(numpy.dot(data, data)) - data *= data - out = numpy.atleast_1d(numpy.sum(data, axis=axis)) - numpy.sqrt(out, out) - return out - else: - data *= data - numpy.sum(data, axis=axis, out=out) - numpy.sqrt(out, out) - - -def unit_vector(data, axis=None, out=None): - """Return ndarray normalized by length, i.e. eucledian norm, along axis. - - >>> v0 = numpy.random.random(3) - >>> v1 = unit_vector(v0) - >>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0)) - True - >>> v0 = numpy.random.rand(5, 4, 3) - >>> v1 = unit_vector(v0, axis=-1) - >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2) - >>> numpy.allclose(v1, v2) - True - >>> v1 = unit_vector(v0, axis=1) - >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1) - >>> numpy.allclose(v1, v2) - True - >>> v1 = numpy.empty((5, 4, 3), dtype=numpy.float64) - >>> unit_vector(v0, axis=1, out=v1) - >>> numpy.allclose(v1, v2) - True - >>> list(unit_vector([])) - [] - >>> list(unit_vector([1.0])) - [1.0] - - """ - if out is None: - data = numpy.array(data, dtype=numpy.float64, copy=True) - if data.ndim == 1: - data /= math.sqrt(numpy.dot(data, data)) - return data - else: - if out is not data: - out[:] = numpy.array(data, copy=False) - data = out - length = numpy.atleast_1d(numpy.sum(data*data, axis)) - numpy.sqrt(length, length) - if axis is not None: - length = numpy.expand_dims(length, axis) - data /= length - if out is None: - return data - - -def random_vector(size): - """Return array of random doubles in the half-open interval [0.0, 1.0). - - >>> v = random_vector(10000) - >>> numpy.all(v >= 0.0) and numpy.all(v < 1.0) - True - >>> v0 = random_vector(10) - >>> v1 = random_vector(10) - >>> numpy.any(v0 == v1) - False - - """ - return numpy.random.random(size) - - -def inverse_matrix(matrix): - """Return inverse of square transformation matrix. - - >>> M0 = random_rotation_matrix() - >>> M1 = inverse_matrix(M0.T) - >>> numpy.allclose(M1, numpy.linalg.inv(M0.T)) - True - >>> for size in range(1, 7): - ... M0 = numpy.random.rand(size, size) - ... M1 = inverse_matrix(M0) - ... if not numpy.allclose(M1, numpy.linalg.inv(M0)): print size - - """ - return numpy.linalg.inv(matrix) - - -def concatenate_matrices(*matrices): - """Return concatenation of series of transformation matrices. - - >>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5 - >>> numpy.allclose(M, concatenate_matrices(M)) - True - >>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T)) - True - - """ - M = numpy.identity(4) - for i in matrices: - M = numpy.dot(M, i) - return M - - -def is_same_transform(matrix0, matrix1): - """Return True if two matrices perform same transformation. - - >>> is_same_transform(numpy.identity(4), numpy.identity(4)) - True - >>> is_same_transform(numpy.identity(4), random_rotation_matrix()) - False - - """ - matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True) - matrix0 /= matrix0[3, 3] - matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True) - matrix1 /= matrix1[3, 3] - return numpy.allclose(matrix0, matrix1) - - -def _import_module(module_name, warn=True, prefix='_py_', ignore='_'): - """Try import all public attributes from module into global namespace. - - Existing attributes with name clashes are renamed with prefix. - Attributes starting with underscore are ignored by default. - - Return True on successful import. - - """ - try: - module = __import__(module_name) - except ImportError: - if warn: - warnings.warn("Failed to import module " + module_name) - else: - for attr in dir(module): - if ignore and attr.startswith(ignore): - continue - if prefix: - if attr in globals(): - globals()[prefix + attr] = globals()[attr] - elif warn: - warnings.warn("No Python implementation of " + attr) - globals()[attr] = getattr(module, attr) - return True \ No newline at end of file diff --git a/common/helpers/tf_module/srv/ApplyTransform.srv b/common/helpers/tf_module/srv/ApplyTransform.srv deleted file mode 100644 index 2c2299770..000000000 --- a/common/helpers/tf_module/srv/ApplyTransform.srv +++ /dev/null @@ -1,4 +0,0 @@ -geometry_msgs/Point[] points # point we want to transform -geometry_msgs/TransformStamped transform #the transform we want to use ---- -geometry_msgs/Point[] new_points # the transformed point \ No newline at end of file diff --git a/common/helpers/tf_module/srv/BaseTransform.srv b/common/helpers/tf_module/srv/BaseTransform.srv deleted file mode 100644 index 4ed61b6b5..000000000 --- a/common/helpers/tf_module/srv/BaseTransform.srv +++ /dev/null @@ -1,5 +0,0 @@ -geometry_msgs/Point[] points # point we want to transform -std_msgs/String frame # the frame of the point -std_msgs/String target_frame # the frame we want to transform the point to ---- -geometry_msgs/PointStamped[] new_points # the transformed point diff --git a/common/helpers/tf_module/srv/LatestTransform.srv b/common/helpers/tf_module/srv/LatestTransform.srv deleted file mode 100644 index e41175e19..000000000 --- a/common/helpers/tf_module/srv/LatestTransform.srv +++ /dev/null @@ -1,4 +0,0 @@ -string from_frame # source frame -string target_frame # target frame ---- -geometry_msgs/TransformStamped transform # transform \ No newline at end of file diff --git a/common/helpers/tf_module/srv/TfTransform.srv b/common/helpers/tf_module/srv/TfTransform.srv deleted file mode 100644 index 35b0e70fc..000000000 --- a/common/helpers/tf_module/srv/TfTransform.srv +++ /dev/null @@ -1,10 +0,0 @@ -geometry_msgs/PoseArray pose_array -sensor_msgs/PointCloud2 pointcloud -geometry_msgs/PointStamped point -std_msgs/String target_frame -std_msgs/String source_frame ---- -geometry_msgs/PoseArray target_pose_array -sensor_msgs/PointCloud2 target_pointcloud -geometry_msgs/PointStamped target_point -geometry_msgs/Pose translation_and_rotation \ No newline at end of file diff --git a/common/speech/lasr_rasa/package.xml b/common/speech/lasr_rasa/package.xml index f909b5640..29394262f 100644 --- a/common/speech/lasr_rasa/package.xml +++ b/common/speech/lasr_rasa/package.xml @@ -7,7 +7,7 @@ - Jared + Jared Swift diff --git a/common/speech/lasr_rasa/scripts/rasa.sh b/common/speech/lasr_rasa/scripts/rasa.sh deleted file mode 100755 index a8b7aeacd..000000000 --- a/common/speech/lasr_rasa/scripts/rasa.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/bash -rosrun lasr_rasa rasa run --enable-api -p $1 -m $2 \ No newline at end of file diff --git a/common/speech/lasr_speech/CMakeLists.txt b/common/speech/lasr_speech/CMakeLists.txt deleted file mode 100644 index 4a07fded5..000000000 --- a/common/speech/lasr_speech/CMakeLists.txt +++ /dev/null @@ -1,208 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(lasr_speech) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - rospy - std_msgs - message_generation - lasr_rasa - lasr_speech_recognition_msgs - lasr_speech_recognition_whisper -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -# catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder -add_service_files( - FILES - Speech.srv -) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -generate_messages( - DEPENDENCIES - std_msgs -) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES lasr_speech -# CATKIN_DEPENDS lasr_rasa lasr_speech_recognition_msgs lasr_speech_recognition_whisper rospy -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/lasr_speech.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/lasr_speech_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_lasr_speech.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/common/speech/lasr_speech/README.md b/common/speech/lasr_speech/README.md deleted file mode 100644 index 6b14f0f6e..000000000 --- a/common/speech/lasr_speech/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# lasr_speech - -The lasr_speech package - -This package is maintained by: -- [Jared](mailto:j.w.swift@outlook.com) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- lasr_rasa (build) -- lasr_speech_recognition_msgs (build) -- lasr_speech_recognition_whisper (build) -- rospy (build) -- lasr_rasa (exec) -- lasr_speech_recognition_msgs (exec) -- lasr_speech_recognition_whisper (exec) -- rospy (exec) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -#### `speech` - -No description provided. - -| Argument | Default | Description | -|:-:|:-:|---| -| matcher | by-index | | -| device_param | | | -| rasa_model | | | - - - -### Messages - -This package has no messages. - -### Services - -#### `Speech` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| play_sound | bool | | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| json_response | string | | -| success | bool | | - - -### Actions - -This package has no actions. diff --git a/common/speech/lasr_speech/launch/speech.launch b/common/speech/lasr_speech/launch/speech.launch deleted file mode 100644 index b4a3dfae5..000000000 --- a/common/speech/lasr_speech/launch/speech.launch +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - diff --git a/common/speech/lasr_speech/nodes/service b/common/speech/lasr_speech/nodes/service deleted file mode 100755 index 901058ba4..000000000 --- a/common/speech/lasr_speech/nodes/service +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env python3 -import rospy -from lasr_speech.srv import Speech, SpeechResponse -from lasr_rasa.srv import Rasa -from lasr_speech_recognition_msgs.srv import TranscribeAudio -import sounddevice -import soundfile -import pyaudio -from time import sleep -from multiprocessing import Process -import rospkg -import os - -class TranscribeAndParse: - - def __init__(self): - rospy.wait_for_service("/lasr_rasa/parse") - self.rasa = rospy.ServiceProxy("/lasr_rasa/parse", Rasa) - self.transcribe_audio = rospy.ServiceProxy("/whisper/transcribe_audio", TranscribeAudio) - self.sound_data = soundfile.read(os.path.join(rospkg.RosPack().get_path("lasr_speech"), "sounds", "beep.wav"))[0] - - def play_sound(self): - def play(): - audio_interface = pyaudio.PyAudio() - sample_rate = int(audio_interface.get_default_output_device_info()["defaultSampleRate"]) - sounddevice.play(self.sound_data, sample_rate, - device=audio_interface.get_default_output_device_info()["index"]) - sounddevice.wait() - audio_interface.terminate() - p = Process(target=play) - p.start() - sleep(0.5) - - def __call__(self, req): - if req.play_sound: - self.play_sound() - transcription = self.transcribe_audio() - rospy.loginfo(transcription) - rasa_response = self.rasa(transcription.phrase) - rospy.loginfo(rasa_response) - return SpeechResponse(rasa_response.json_response, rasa_response.success) - -if __name__ == "__main__": - rospy.init_node("lasr_speech") - worker = TranscribeAndParse() - rospy.Service("/lasr_speech/transcribe_and_parse", Speech, worker) - rospy.spin() diff --git a/common/speech/lasr_speech/package.xml b/common/speech/lasr_speech/package.xml deleted file mode 100644 index 7af6f40c3..000000000 --- a/common/speech/lasr_speech/package.xml +++ /dev/null @@ -1,71 +0,0 @@ - - - lasr_speech - 0.0.0 - The lasr_speech package - - - - - Jared - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - lasr_rasa - lasr_speech_recognition_msgs - lasr_speech_recognition_whisper - rospy - lasr_rasa - lasr_speech_recognition_msgs - lasr_speech_recognition_whisper - rospy - lasr_rasa - lasr_speech_recognition_msgs - lasr_speech_recognition_whisper - rospy - - - - - - - - diff --git a/common/speech/lasr_speech/sounds/beep.wav b/common/speech/lasr_speech/sounds/beep.wav deleted file mode 100644 index f224049f9..000000000 Binary files a/common/speech/lasr_speech/sounds/beep.wav and /dev/null differ diff --git a/common/speech/lasr_speech/srv/Speech.srv b/common/speech/lasr_speech/srv/Speech.srv deleted file mode 100644 index bd5f7760b..000000000 --- a/common/speech/lasr_speech/srv/Speech.srv +++ /dev/null @@ -1,4 +0,0 @@ -bool play_sound ---- -string json_response -bool success \ No newline at end of file diff --git a/common/speech/lasr_voice/package.xml b/common/speech/lasr_voice/package.xml index 499c4d263..51e5d8fd5 100644 --- a/common/speech/lasr_voice/package.xml +++ b/common/speech/lasr_voice/package.xml @@ -7,7 +7,7 @@ - elisabeth + Jared Swift diff --git a/common/speech/lasr_voice/src/lasr_voice/voice.py b/common/speech/lasr_voice/src/lasr_voice/voice.py index fd6307607..0bc657c9f 100755 --- a/common/speech/lasr_voice/src/lasr_voice/voice.py +++ b/common/speech/lasr_voice/src/lasr_voice/voice.py @@ -4,18 +4,17 @@ import actionlib from actionlib_msgs.msg import GoalStatus -# TODO: input is real life or simulation, as it will voice will not play otherwise class Voice: def __init__(self): - self._tts_client = actionlib.SimpleActionClient('/tts', TtsAction) - self.can_tts = self._tts_client.wait_for_server(rospy.Duration(10.0)) + self._tts_client = actionlib.SimpleActionClient("/tts", TtsAction) + self._can_tts = self._tts_client.wait_for_server(rospy.Duration(10.0)) def __tts(self, text): - if self.can_tts: + if self._can_tts: goal = TtsGoal() goal.rawtext.text = text - goal.rawtext.lang_id = 'en_GB' + goal.rawtext.lang_id = "en_GB" self._tts_client.send_goal(goal) rospy.loginfo(f"\033[32mTIAGO: {text}\033[0m") @@ -30,19 +29,7 @@ def get_tts_status(self): return self._tts_client.get_state() def is_running(self): - return self._tts_client.get_state() == GoalStatus.PENDING or \ - self._tts_client.get_state() == GoalStatus.ACTIVE - def speak(self, text): - if rospy.get_param('/is_simulation'): - rospy.loginfo(text) - else: - rospy.loginfo(text) - self.sync_tts(text) - -if __name__ == '__main__': - rospy.init_node("voice_node", anonymous=True) - voice = Voice() - voice.sync_tts("hello world nr 1") - voice.async_tts("hello world nr 2") - while voice.is_running(): - print("still talking") + return ( + self._tts_client.get_state() == GoalStatus.PENDING + or self._tts_client.get_state() == GoalStatus.ACTIVE + ) diff --git a/common/third_party/aruco_ros b/common/third_party/aruco_ros deleted file mode 160000 index e8f93a0a8..000000000 --- a/common/third_party/aruco_ros +++ /dev/null @@ -1 +0,0 @@ -Subproject commit e8f93a0a8ffce542ec1952c573e7f1a063ed2a87 diff --git a/common/vision/lasr_vision_msgs/CMakeLists.txt b/common/vision/lasr_vision_msgs/CMakeLists.txt index f79567dc5..2ac480afb 100644 --- a/common/vision/lasr_vision_msgs/CMakeLists.txt +++ b/common/vision/lasr_vision_msgs/CMakeLists.txt @@ -46,6 +46,7 @@ find_package(catkin REQUIRED COMPONENTS message_generation message_runtime senso add_message_files( FILES Detection.msg + Detection3D.msg BodyPixPose.msg BodyPixMask.msg BodyPixMaskRequest.msg @@ -57,6 +58,7 @@ add_message_files( add_service_files( FILES YoloDetection.srv + YoloDetection3D.srv BodyPixDetection.srv TorchFaceFeatureDetection.srv Recognise.srv diff --git a/common/vision/lasr_vision_msgs/msg/Detection3D.msg b/common/vision/lasr_vision_msgs/msg/Detection3D.msg new file mode 100644 index 000000000..de41b3451 --- /dev/null +++ b/common/vision/lasr_vision_msgs/msg/Detection3D.msg @@ -0,0 +1,18 @@ +# Detected Object Class +string name + +# How certain we are this is what we think it is +float32 confidence + +# Bounding box mask defined in pixel-space +int32[] xywh + +# Target frame +string target_frame + +# Segmentation mask defined in pixel-space +# +# This will be empty if a segmentation model was not used +int32[] xyseg + +geometry_msgs/Point point \ No newline at end of file diff --git a/common/vision/lasr_vision_msgs/srv/YoloDetection3D.srv b/common/vision/lasr_vision_msgs/srv/YoloDetection3D.srv new file mode 100644 index 000000000..13e146fab --- /dev/null +++ b/common/vision/lasr_vision_msgs/srv/YoloDetection3D.srv @@ -0,0 +1,16 @@ +# PointCloud2 to run inference on +sensor_msgs/PointCloud2 pcl + +# YOLO model to use +string dataset + +# How certain the detection should be to include +float32 confidence + +# Non-maximum Supression +# +# Guiding value to remove overlapping bounding boxes +float32 nms +--- +# Detection result +lasr_vision_msgs/Detection3D[] detected_objects \ No newline at end of file diff --git a/common/vision/lasr_vision_yolov8/CMakeLists.txt b/common/vision/lasr_vision_yolov8/CMakeLists.txt index 0ab61134d..a65d456c0 100644 --- a/common/vision/lasr_vision_yolov8/CMakeLists.txt +++ b/common/vision/lasr_vision_yolov8/CMakeLists.txt @@ -162,6 +162,7 @@ include_directories( catkin_install_python(PROGRAMS nodes/service examples/relay + examples/relay_3d examples/construct_mask DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} ) diff --git a/common/vision/lasr_vision_yolov8/examples/relay_3d b/common/vision/lasr_vision_yolov8/examples/relay_3d new file mode 100644 index 000000000..1ffc9f758 --- /dev/null +++ b/common/vision/lasr_vision_yolov8/examples/relay_3d @@ -0,0 +1,56 @@ +#!/usr/bin/env python3 + +import sys +import rospy +import threading + +from sensor_msgs.msg import PointCloud2 +from lasr_vision_msgs.srv import YoloDetection3D, YoloDetection3DRequest + +# figure out what model we are using +if len(sys.argv) >= 2: + model = sys.argv[1] +else: + model = "yolov8n-seg.pt" + +processing = False + + +def detect(pcl): + global processing + processing = True + rospy.loginfo("Received pcl message") + + try: + detect_service = rospy.ServiceProxy("/yolov8/detect3d", YoloDetection3D) + req = YoloDetection3DRequest() + req.pcl = pcl + req.dataset = model + req.confidence = 0.25 + req.nms = 0.4 + resp = detect_service(req) + print(resp) + except rospy.ServiceException as e: + rospy.logerr("Service call failed: %s" % e) + finally: + processing = False + + +def pcl_callback(pcl): + global processing + if processing: + return + + t = threading.Thread(target=detect, args=(pcl,)) + t.start() + + +def listener(): + rospy.init_node("pcl_listener", anonymous=True) + rospy.wait_for_service("/yolov8/detect3d") + rospy.Subscriber("/xtion/depth_registered/points", PointCloud2, pcl_callback) + rospy.spin() + + +if __name__ == "__main__": + listener() \ No newline at end of file diff --git a/common/vision/lasr_vision_yolov8/nodes/service b/common/vision/lasr_vision_yolov8/nodes/service index 301f1b9c3..fd1b0791f 100644 --- a/common/vision/lasr_vision_yolov8/nodes/service +++ b/common/vision/lasr_vision_yolov8/nodes/service @@ -6,21 +6,31 @@ import rospkg import lasr_vision_yolov8 as yolo from sensor_msgs.msg import Image -from lasr_vision_msgs.srv import YoloDetection, YoloDetectionRequest, YoloDetectionResponse +from visualization_msgs.msg import Marker + +from lasr_vision_msgs.srv import ( + YoloDetection, + YoloDetectionRequest, + YoloDetectionResponse, + YoloDetection3D, + YoloDetection3DRequest, + YoloDetection3DResponse, +) # Put ourselves in the model folder import os import rospkg + rp = rospkg.RosPack() -package_path = rp.get_path('lasr_vision_yolov8') -os.chdir(os.path.abspath(os.path.join(package_path, 'models'))) +package_path = rp.get_path("lasr_vision_yolov8") +os.chdir(os.path.abspath(os.path.join(package_path, "models"))) # Initialise rospy -rospy.init_node('yolov8_service') +rospy.init_node("yolov8_service") # Determine variables -DEBUG = rospy.get_param('~debug', False) -PRELOAD = rospy.get_param('~preload', []) +DEBUG = rospy.get_param("~debug", False) +PRELOAD = rospy.get_param("~preload", []) for model in PRELOAD: yolo.load_model(model) @@ -28,21 +38,48 @@ for model in PRELOAD: # Prepare publisher debug_publishers = {} if DEBUG: - debug_publisher = rospy.Publisher('/yolov8/debug', Image, queue_size=1) + debug_publisher = rospy.Publisher("/yolov8/debug", Image, queue_size=1) + def detect(request: YoloDetectionRequest) -> YoloDetectionResponse: - ''' + """ Hand off detection request to yolo library - ''' + """ debug_publisher = None if DEBUG: if request.dataset in debug_publishers: debug_publisher = debug_publishers[request.dataset] else: - topic_name = re.sub(r'[\W_]+', '', request.dataset) - debug_publisher = rospy.Publisher(f'/yolov8/debug/{topic_name}', Image, queue_size=1) + topic_name = re.sub(r"[\W_]+", "", request.dataset) + debug_publisher = rospy.Publisher( + f"/yolov8/debug/{topic_name}", Image, queue_size=1 + ) return yolo.detect(request, debug_publisher) -rospy.Service('/yolov8/detect', YoloDetection, detect) -rospy.loginfo('YOLOv8 service started') + +def detect_3d(request: YoloDetection3DRequest) -> YoloDetection3DResponse: + """ + Hand off detection request to yolo library + """ + debug_inference_publisher, debug_point_publisher = None, None + if DEBUG: + if request.dataset in debug_publishers: + debug_inference_publisher, debug_point_publisher = debug_publishers[ + request.dataset + ] + else: + topic_name = re.sub(r"[\W_]+", "", request.dataset) + debug_inference_publisher = rospy.Publisher( + f"/yolov8/debug/{topic_name}", Image, queue_size=1 + ) + debug_point_publisher = rospy.Publisher( + f"/yolov8/debug/points", Marker, queue_size=10 + ) + + return yolo.detect_3d(request, debug_inference_publisher, debug_point_publisher) + + +rospy.Service("/yolov8/detect", YoloDetection, detect) +rospy.Service("/yolov8/detect3d", YoloDetection3D, detect_3d) +rospy.loginfo("YOLOv8 service started") rospy.spin() diff --git a/common/vision/lasr_vision_yolov8/src/lasr_vision_yolov8/__init__.py b/common/vision/lasr_vision_yolov8/src/lasr_vision_yolov8/__init__.py index 7cc994930..1a1193a4f 100644 --- a/common/vision/lasr_vision_yolov8/src/lasr_vision_yolov8/__init__.py +++ b/common/vision/lasr_vision_yolov8/src/lasr_vision_yolov8/__init__.py @@ -1 +1 @@ -from .yolo import detect, load_model +from .yolo import detect, detect_3d, load_model diff --git a/common/vision/lasr_vision_yolov8/src/lasr_vision_yolov8/yolo.py b/common/vision/lasr_vision_yolov8/src/lasr_vision_yolov8/yolo.py index d197c28ab..bdb5ca586 100644 --- a/common/vision/lasr_vision_yolov8/src/lasr_vision_yolov8/yolo.py +++ b/common/vision/lasr_vision_yolov8/src/lasr_vision_yolov8/yolo.py @@ -1,51 +1,92 @@ import rospy import cv2_img +import cv2_pcl import numpy as np from PIL import Image from ultralytics import YOLO from sensor_msgs.msg import Image as SensorImage -from lasr_vision_msgs.msg import Detection -from lasr_vision_msgs.srv import YoloDetectionRequest, YoloDetectionResponse +from lasr_vision_msgs.msg import Detection, Detection3D +from lasr_vision_msgs.srv import ( + YoloDetectionRequest, + YoloDetectionResponse, + YoloDetection3DRequest, + YoloDetection3DResponse, +) +import cv2 +import ros_numpy as rnp + +from geometry_msgs.msg import Point, PointStamped + +import tf2_ros as tf +import tf2_geometry_msgs + +from visualization_msgs.msg import Marker + + +def create_point_marker(x, y, z, idx, g=1.0): + marker_msg = Marker() + marker_msg.header.frame_id = "map" + marker_msg.header.stamp = rospy.Time.now() + marker_msg.id = idx + marker_msg.type = Marker.SPHERE + marker_msg.action = Marker.ADD + marker_msg.pose.position.x = x + marker_msg.pose.position.y = y + marker_msg.pose.position.z = z + marker_msg.pose.orientation.w = 1.0 + marker_msg.scale.x = 0.1 + marker_msg.scale.y = 0.1 + marker_msg.scale.z = 0.1 + marker_msg.color.a = 1.0 + marker_msg.color.r = 0.0 + marker_msg.color.g = g + marker_msg.color.b = 0.0 + return marker_msg + # model cache loaded_models = {} + def load_model(dataset: str) -> None: - ''' + """ Load a model into cache - ''' + """ model = None if dataset in loaded_models: model = loaded_models[dataset] else: model = YOLO(dataset) - rospy.loginfo(f'Loaded {dataset} model') + rospy.loginfo(f"Loaded {dataset} model") loaded_models[dataset] = model - + return model -def detect(request: YoloDetectionRequest, debug_publisher: rospy.Publisher | None) -> YoloDetectionResponse: - ''' + +def detect( + request: YoloDetectionRequest, debug_publisher: rospy.Publisher +) -> YoloDetectionResponse: + """ Run YOLO inference on given detection request - ''' + """ # decode the image - rospy.loginfo('Decoding') + rospy.loginfo("Decoding") img = cv2_img.msg_to_pillow_img(request.image_raw) - + # load model - rospy.loginfo('Loading model') + rospy.loginfo("Loading model") model = load_model(request.dataset) # run inference - rospy.loginfo('Running inference') + rospy.loginfo("Running inference") results = model(img, conf=request.confidence, iou=request.nms) result = results[0] - rospy.loginfo('Inference complete') + rospy.loginfo("Inference complete") # construct response detected_objects = [] @@ -62,11 +103,84 @@ def detect(request: YoloDetectionRequest, debug_publisher: rospy.Publisher | Non detection.xyseg = result.masks.xy[i].flatten().astype(int).tolist() detected_objects.append(detection) - + # publish to debug topic if debug_publisher is not None: debug_publisher.publish(cv2_img.cv2_img_to_msg(result.plot())) - + response = YoloDetectionResponse() response.detected_objects = detected_objects return response + + +def detect_3d( + request: YoloDetection3DRequest, + debug_inference_publisher: rospy.Publisher, + debug_point_publisher: rospy.Publisher, +) -> YoloDetection3DResponse: + """ + Run YOLO 3D inference on given detection request + """ + + rospy.loginfo("Waiting for transformation to become available") + tf_buffer = tf.Buffer() + # Wait for the transformation to become available + while not tf_buffer.can_transform( + "map", request.pcl.header.frame_id, rospy.Time(0) + ): + rospy.sleep(0.01) + + # Extract rgb image from pointcloud + rospy.loginfo("Decoding") + img = cv2_pcl.pcl_to_cv2(request.pcl) + + # load model + rospy.loginfo("Loading model") + model = load_model(request.dataset) + + # run inference + rospy.loginfo("Running inference") + results = model(img, conf=request.confidence, iou=request.nms) + result = results[0] + rospy.loginfo("Inference complete") + + # construct response + detected_objects = [] + object_count = result.boxes.cls.size(dim=0) + has_segment_masks = result.masks is not None + for i in range(0, object_count): + detection = Detection3D() + detection.name = result.names[int(result.boxes.cls[i])] + detection.confidence = float(result.boxes.conf.cpu().numpy()[i]) + detection.xywh = result.boxes.xywh[i].cpu().numpy().astype(int).tolist() + + # copy segmented mask if available + if has_segment_masks: + detection.xyseg = result.masks.xy[i].flatten().astype(int).tolist() + + # xyz_points = [pcl_xyz[x][y] for x, y in indices] + centroid = cv2_pcl.seg_to_centroid(request.pcl, np.array(detection.xyseg)) + + point = Point(*centroid) + point_stamped = PointStamped() + point_stamped.point = point + point_stamped.header.frame_id = request.pcl.header.frame_id + detection.point = tf_buffer.transform( + point_stamped, request.target_frame + ).point + + if debug_point_publisher is not None: + marker = create_point_marker( + detection.point.x, detection.point.y, detection.point.z, i + ) + debug_point_publisher.publish(marker) + + detected_objects.append(detection) + + # publish to debug topic + if debug_inference_publisher is not None: + debug_inference_publisher.publish(cv2_img.cv2_img_to_msg(result.plot())) + + response = YoloDetection3DResponse() + response.detected_objects = detected_objects + return response diff --git a/legacy/face_detection/__init__.py b/legacy/aruco_service/CATKIN_IGNORE similarity index 100% rename from legacy/face_detection/__init__.py rename to legacy/aruco_service/CATKIN_IGNORE diff --git a/legacy/face_detection/src/__init__.py b/legacy/choosing_wait_position/CATKIN_IGNORE similarity index 100% rename from legacy/face_detection/src/__init__.py rename to legacy/choosing_wait_position/CATKIN_IGNORE diff --git a/legacy/common_math/CMakeLists.txt b/legacy/common_math/CMakeLists.txt deleted file mode 100644 index 934b0ad8d..000000000 --- a/legacy/common_math/CMakeLists.txt +++ /dev/null @@ -1,212 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(common_math) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED - actionlib - actionlib_msgs - geometry_msgs - message_generation - rospy - std_msgs - std_srvs - sensor_msgs -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html - catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder -add_service_files( - FILES - TfTransform.srv -) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -generate_messages(DEPENDENCIES - actionlib_msgs - geometry_msgs - sensor_msgs - std_msgs -) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES common_math -# CATKIN_DEPENDS other_catkin_pkg -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include -# ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/common_math.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/common_math_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_common_math.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/legacy/common_math/README.md b/legacy/common_math/README.md deleted file mode 100644 index 5d7e2598f..000000000 --- a/legacy/common_math/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# common_math - -The common_math package - -This package is maintained by: -- [jared](mailto:jared@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- actionlib (build) -- actionlib_msgs (build) -- geometry_msgs (build) -- message_generation (build) -- rospy (build) -- std_msgs (build) -- std_srvs (build) -- actionlib (exec) -- actionlib_msgs (exec) -- geometry_msgs (exec) -- rospy (exec) -- std_msgs (exec) -- std_srvs (exec) -- face_detection (exec) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -This package has no launch files. - -### Messages - -This package has no messages. - -### Services - -#### `TfTransform` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| pose_array | geometry_msgs/PoseArray | | -| pointcloud | sensor_msgs/PointCloud2 | | -| point | geometry_msgs/PointStamped | | -| target_frame | std_msgs/String | | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| target_pose_array | geometry_msgs/PoseArray | | -| target_pointcloud | sensor_msgs/PointCloud2 | | -| target_point | geometry_msgs/PointStamped | | - - -### Actions - -This package has no actions. diff --git a/legacy/common_math/package.xml b/legacy/common_math/package.xml deleted file mode 100644 index c0c4b2acb..000000000 --- a/legacy/common_math/package.xml +++ /dev/null @@ -1,78 +0,0 @@ - - - common_math - 0.0.0 - The common_math package - - - - - jared - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - actionlib - actionlib_msgs - geometry_msgs - message_generation - rospy - std_msgs - std_srvs - actionlib - actionlib_msgs - geometry_msgs - rospy - std_msgs - std_srvs - actionlib - actionlib_msgs - geometry_msgs - rospy - std_msgs - std_srvs - face_detection - - - - - - - diff --git a/legacy/common_math/setup.py b/legacy/common_math/setup.py deleted file mode 100644 index ab3e09d99..000000000 --- a/legacy/common_math/setup.py +++ /dev/null @@ -1,12 +0,0 @@ - -#!/usr/bin/env python3 - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -setup_args = generate_distutils_setup( - packages=['common_math'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/common_math/src/common_math/__init__.py b/legacy/common_math/src/common_math/__init__.py deleted file mode 100644 index d06a271a9..000000000 --- a/legacy/common_math/src/common_math/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .math_ import * diff --git a/legacy/common_math/src/common_math/helpers/common_math_helpers.py b/legacy/common_math/src/common_math/helpers/common_math_helpers.py deleted file mode 100644 index 867ce5257..000000000 --- a/legacy/common_math/src/common_math/helpers/common_math_helpers.py +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env python3 - -import rospy -from geometry_msgs.msg import PoseWithCovarianceStamped -from tiago_controllers.helpers.pose_helpers import get_pose_from_param -from common_math.math_ import euclidian_distance - - - -def get_dist_to_door(is_robot, x=None, y=None): - if is_robot: - robot_pose = rospy.wait_for_message("/robot_pose", PoseWithCovarianceStamped) - print(f"robot pose: {robot_pose}") - r = (robot_pose.pose.pose.position.x, robot_pose.pose.pose.position.y) - else: - r = (x, y) - - door_position = get_pose_from_param("/door/pose") - print(f"door pose: {door_position}") - d = (door_position.position.x, door_position.position.y) - - dist = euclidian_distance(r, d) - print(f"distance to door: {dist}") - return dist - -# def get_how_close_to_door(is_robot, min_dist=0.5): -# dist = self.get_dist_to_door(is_robot) -# return round(dist, 1) < min_dist diff --git a/legacy/common_math/src/common_math/math.py b/legacy/common_math/src/common_math/math.py deleted file mode 100755 index 433458881..000000000 --- a/legacy/common_math/src/common_math/math.py +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/python -import numpy as np -import rospy -import numpy as np -# import ros_numpy as rnp -from geometry_msgs.msg import PointStamped, Point -from std_msgs.msg import Header -from common_math.srv import TfTransform, TfTransformRequest - -def pcl_msg_to_cv2(pcl_msg): - """ - Constructs a cv2 image from a PointCloud2 message. - - Parameters - ---------- - pcl_msg : sensor_msgs/PointCloud2 - Input pointcloud (organised) - - Returns - ------- - np.array : cv2 image - """ - - # Extract rgb image from pointcloud - frame = np.fromstring(pcl_msg.data, dtype=np.uint8) - frame = frame.reshape(pcl_msg.height, pcl_msg.width, 32) - frame = frame[:,:,16:19] - - # Ensure array is contiguous - frame = np.ascontiguousarray(frame, dtype=np.uint8) - - return frame - - -def tf_transform(target_frame, pose_array=None, pointcloud=None, point=None): - """ - Transforms the given message to the target frame. - Parameters: - target_frame {frame_id} -- frame to transform to - pose_array {PoseArray} -- array of poses - pointcloud {PointCloud2} - point {PointStamped} - - Returns: - response {TfTransformResponse} -- target_pose_array {PoseArray}, target_pointcloud {PointCloud2}, target_point {PointStamped} - """ - - assert pose_array is not None or pointcloud is not None or point is not None - - rospy.wait_for_service('tf_transform', timeout=10) - try: - tf_transform_srv = rospy.ServiceProxy('tf_transform', TfTransform) - request = TfTransformRequest() - if pose_array is not None: - request.pose_array = pose_array - if pointcloud is not None: - request.pointcloud = pointcloud - if point is not None: - request.point = point - request.target_frame.data = target_frame - response = tf_transform_srv(request) - # print('transform done!') - return response - except rospy.ServiceException as e: - print("Service call failed: %s" % e) -def bb_to_centroid(pcl_msg, x, y, w, h): - """ - Computes a centroid in the map plane from a given bounding box. - Performs this by extracting the bounding box from the pointcloud, - and computing the centroid from that. - - Parameters - ---------- - pcl_msg : sensor_msgs/PointCloud2 - Input pointcloud (organised). - x : float - Bottom left x of bounding box. - y : float - Bottom left y of bounding box. - w : float - Width of bounding box. - h : float - Height of bounding box. - - Returns - ------- - geometry_msgs/PointStamped : centroid - """ - - # Convert xywh to bounding box coordinates. - x1, y1, x2, y2 = x, y, x + w, y + h - - # cv2 image - frame = pcl_msg_to_cv2(pcl_msg) - - # Compute mask from bounding box - mask = np.zeros(shape=frame.shape[:2]) - mask[y1:y2, x1:x2] = 255 # bounding box dimensions - - # Extract mask indices from bounding box - indices = np.argwhere(mask) - if len(indices) < 1: - return None - pcl_xyz = rnp.point_cloud2.pointcloud2_to_xyz_array(pcl_msg, remove_nans=False) - - # Create x, y, z array, for indexing. - xyz_points = [] - for x, y in indices: - x, y, z = pcl_xyz[x][y] - xyz_points.append([x, y, z]) - - # Get mean (centroid) point. - x, y, z = np.nanmean(xyz_points, axis=0) - - # Construct PointStamped message. - centroid_ = PointStamped(point=Point(x, y, z), header=Header(1, rospy.Time(0), "xtion_depth_optical_frame")) - - # Transform to map frame - centroid_ = tf_transform('map', point=centroid_).target_point - - return centroid_ \ No newline at end of file diff --git a/legacy/common_math/src/common_math/transformations.py b/legacy/common_math/src/common_math/transformations.py deleted file mode 100644 index 8872cbe84..000000000 --- a/legacy/common_math/src/common_math/transformations.py +++ /dev/null @@ -1,1705 +0,0 @@ -# -*- coding: utf-8 -*- -# transformations.py - -# Copyright (c) 2006, Christoph Gohlke -# Copyright (c) 2006-2009, The Regents of the University of California -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of the copyright holders nor the names of any -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -"""Homogeneous Transformation Matrices and Quaternions. - -A library for calculating 4x4 matrices for translating, rotating, reflecting, -scaling, shearing, projecting, orthogonalizing, and superimposing arrays of -3D homogeneous coordinates as well as for converting between rotation matrices, -Euler angles, and quaternions. Also includes an Arcball control object and -functions to decompose transformation matrices. - -:Authors: - `Christoph Gohlke `__, - Laboratory for Fluorescence Dynamics, University of California, Irvine - -:Version: 20090418 - -Requirements ------------- - -* `Python 2.6 `__ -* `Numpy 1.3 `__ -* `transformations.c 20090418 `__ - (optional implementation of some functions in C) - -Notes ------ - -Matrices (M) can be inverted using numpy.linalg.inv(M), concatenated using -numpy.dot(M0, M1), or used to transform homogeneous coordinates (v) using -numpy.dot(M, v) for shape (4, \*) "point of arrays", respectively -numpy.dot(v, M.T) for shape (\*, 4) "array of points". - -Calculations are carried out with numpy.float64 precision. - -This Python implementation is not optimized for speed. - -Vector, point, quaternion, and matrix function arguments are expected to be -"array like", i.e. tuple, list, or numpy arrays. - -Return types are numpy arrays unless specified otherwise. - -Angles are in radians unless specified otherwise. - -Quaternions ix+jy+kz+w are represented as [x, y, z, w]. - -Use the transpose of transformation matrices for OpenGL glMultMatrixd(). - -A triple of Euler angles can be applied/interpreted in 24 ways, which can -be specified using a 4 character string or encoded 4-tuple: - - *Axes 4-string*: e.g. 'sxyz' or 'ryxy' - - - first character : rotations are applied to 's'tatic or 'r'otating frame - - remaining characters : successive rotation axis 'x', 'y', or 'z' - - *Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1) - - - inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix. - - parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed - by 'z', or 'z' is followed by 'x'. Otherwise odd (1). - - repetition : first and last axis are same (1) or different (0). - - frame : rotations are applied to static (0) or rotating (1) frame. - -References ----------- - -(1) Matrices and transformations. Ronald Goldman. - In "Graphics Gems I", pp 472-475. Morgan Kaufmann, 1990. -(2) More matrices and transformations: shear and pseudo-perspective. - Ronald Goldman. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991. -(3) Decomposing a matrix into simple transformations. Spencer Thomas. - In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991. -(4) Recovering the data from the transformation matrix. Ronald Goldman. - In "Graphics Gems II", pp 324-331. Morgan Kaufmann, 1991. -(5) Euler angle conversion. Ken Shoemake. - In "Graphics Gems IV", pp 222-229. Morgan Kaufmann, 1994. -(6) Arcball rotation control. Ken Shoemake. - In "Graphics Gems IV", pp 175-192. Morgan Kaufmann, 1994. -(7) Representing attitude: Euler angles, unit quaternions, and rotation - vectors. James Diebel. 2006. -(8) A discussion of the solution for the best rotation to relate two sets - of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828. -(9) Closed-form solution of absolute orientation using unit quaternions. - BKP Horn. J Opt Soc Am A. 1987. 4(4), 629-642. -(10) Quaternions. Ken Shoemake. - http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf -(11) From quaternion to matrix and back. JMP van Waveren. 2005. - http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm -(12) Uniform random rotations. Ken Shoemake. - In "Graphics Gems III", pp 124-132. Morgan Kaufmann, 1992. - - -Examples --------- - ->>> alpha, beta, gamma = 0.123, -1.234, 2.345 ->>> origin, xaxis, yaxis, zaxis = (0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1) ->>> I = identity_matrix() ->>> Rx = rotation_matrix(alpha, xaxis) ->>> Ry = rotation_matrix(beta, yaxis) ->>> Rz = rotation_matrix(gamma, zaxis) ->>> R = concatenate_matrices(Rx, Ry, Rz) ->>> euler = euler_from_matrix(R, 'rxyz') ->>> numpy.allclose([alpha, beta, gamma], euler) -True ->>> Re = euler_matrix(alpha, beta, gamma, 'rxyz') ->>> is_same_transform(R, Re) -True ->>> al, be, ga = euler_from_matrix(Re, 'rxyz') ->>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz')) -True ->>> qx = quaternion_about_axis(alpha, xaxis) ->>> qy = quaternion_about_axis(beta, yaxis) ->>> qz = quaternion_about_axis(gamma, zaxis) ->>> q = quaternion_multiply(qx, qy) ->>> q = quaternion_multiply(q, qz) ->>> Rq = quaternion_matrix(q) ->>> is_same_transform(R, Rq) -True ->>> S = scale_matrix(1.23, origin) ->>> T = translation_matrix((1, 2, 3)) ->>> Z = shear_matrix(beta, xaxis, origin, zaxis) ->>> R = random_rotation_matrix(numpy.random.rand(3)) ->>> M = concatenate_matrices(T, R, Z, S) ->>> scale, shear, angles, trans, persp = decompose_matrix(M) ->>> numpy.allclose(scale, 1.23) -True ->>> numpy.allclose(trans, (1, 2, 3)) -True ->>> numpy.allclose(shear, (0, math.tan(beta), 0)) -True ->>> is_same_transform(R, euler_matrix(axes='sxyz', *angles)) -True ->>> M1 = compose_matrix(scale, shear, angles, trans, persp) ->>> is_same_transform(M, M1) -True - -""" - -from __future__ import division - -import warnings -import math - -import numpy - -# Documentation in HTML format can be generated with Epydoc -__docformat__ = "restructuredtext en" - - -def identity_matrix(): - """Return 4x4 identity/unit matrix. - - >>> I = identity_matrix() - >>> numpy.allclose(I, numpy.dot(I, I)) - True - >>> numpy.sum(I), numpy.trace(I) - (4.0, 4.0) - >>> numpy.allclose(I, numpy.identity(4, dtype=numpy.float64)) - True - - """ - return numpy.identity(4, dtype=numpy.float64) - - -def translation_matrix(direction): - """Return matrix to translate by direction vector. - - >>> v = numpy.random.random(3) - 0.5 - >>> numpy.allclose(v, translation_matrix(v)[:3, 3]) - True - - """ - M = numpy.identity(4) - M[:3, 3] = direction[:3] - return M - - -def translation_from_matrix(matrix): - """Return translation vector from translation matrix. - - >>> v0 = numpy.random.random(3) - 0.5 - >>> v1 = translation_from_matrix(translation_matrix(v0)) - >>> numpy.allclose(v0, v1) - True - - """ - return numpy.array(matrix, copy=False)[:3, 3].copy() - - -def reflection_matrix(point, normal): - """Return matrix to mirror at plane defined by point and normal vector. - - >>> v0 = numpy.random.random(4) - 0.5 - >>> v0[3] = 1.0 - >>> v1 = numpy.random.random(3) - 0.5 - >>> R = reflection_matrix(v0, v1) - >>> numpy.allclose(2., numpy.trace(R)) - True - >>> numpy.allclose(v0, numpy.dot(R, v0)) - True - >>> v2 = v0.copy() - >>> v2[:3] += v1 - >>> v3 = v0.copy() - >>> v2[:3] -= v1 - >>> numpy.allclose(v2, numpy.dot(R, v3)) - True - - """ - normal = unit_vector(normal[:3]) - M = numpy.identity(4) - M[:3, :3] -= 2.0 * numpy.outer(normal, normal) - M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal - return M - - -def reflection_from_matrix(matrix): - """Return mirror plane point and normal vector from reflection matrix. - - >>> v0 = numpy.random.random(3) - 0.5 - >>> v1 = numpy.random.random(3) - 0.5 - >>> M0 = reflection_matrix(v0, v1) - >>> point, normal = reflection_from_matrix(M0) - >>> M1 = reflection_matrix(point, normal) - >>> is_same_transform(M0, M1) - True - - """ - M = numpy.array(matrix, dtype=numpy.float64, copy=False) - # normal: unit eigenvector corresponding to eigenvalue -1 - l, V = numpy.linalg.eig(M[:3, :3]) - i = numpy.where(abs(numpy.real(l) + 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no unit eigenvector corresponding to eigenvalue -1") - normal = numpy.real(V[:, i[0]]).squeeze() - # point: any unit eigenvector corresponding to eigenvalue 1 - l, V = numpy.linalg.eig(M) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no unit eigenvector corresponding to eigenvalue 1") - point = numpy.real(V[:, i[-1]]).squeeze() - point /= point[3] - return point, normal - - -def rotation_matrix(angle, direction, point=None): - """Return matrix to rotate about axis defined by point and direction. - - >>> angle = (random.random() - 0.5) * (2*math.pi) - >>> direc = numpy.random.random(3) - 0.5 - >>> point = numpy.random.random(3) - 0.5 - >>> R0 = rotation_matrix(angle, direc, point) - >>> R1 = rotation_matrix(angle-2*math.pi, direc, point) - >>> is_same_transform(R0, R1) - True - >>> R0 = rotation_matrix(angle, direc, point) - >>> R1 = rotation_matrix(-angle, -direc, point) - >>> is_same_transform(R0, R1) - True - >>> I = numpy.identity(4, numpy.float64) - >>> numpy.allclose(I, rotation_matrix(math.pi*2, direc)) - True - >>> numpy.allclose(2., numpy.trace(rotation_matrix(math.pi/2, - ... direc, point))) - True - - """ - sina = math.sin(angle) - cosa = math.cos(angle) - direction = unit_vector(direction[:3]) - # rotation matrix around unit vector - R = numpy.array(((cosa, 0.0, 0.0), - (0.0, cosa, 0.0), - (0.0, 0.0, cosa)), dtype=numpy.float64) - R += numpy.outer(direction, direction) * (1.0 - cosa) - direction *= sina - R += numpy.array((( 0.0, -direction[2], direction[1]), - ( direction[2], 0.0, -direction[0]), - (-direction[1], direction[0], 0.0)), - dtype=numpy.float64) - M = numpy.identity(4) - M[:3, :3] = R - if point is not None: - # rotation not around origin - point = numpy.array(point[:3], dtype=numpy.float64, copy=False) - M[:3, 3] = point - numpy.dot(R, point) - return M - - -def rotation_from_matrix(matrix): - """Return rotation angle and axis from rotation matrix. - - >>> angle = (random.random() - 0.5) * (2*math.pi) - >>> direc = numpy.random.random(3) - 0.5 - >>> point = numpy.random.random(3) - 0.5 - >>> R0 = rotation_matrix(angle, direc, point) - >>> angle, direc, point = rotation_from_matrix(R0) - >>> R1 = rotation_matrix(angle, direc, point) - >>> is_same_transform(R0, R1) - True - - """ - R = numpy.array(matrix, dtype=numpy.float64, copy=False) - R33 = R[:3, :3] - # direction: unit eigenvector of R33 corresponding to eigenvalue of 1 - l, W = numpy.linalg.eig(R33.T) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no unit eigenvector corresponding to eigenvalue 1") - direction = numpy.real(W[:, i[-1]]).squeeze() - # point: unit eigenvector of R33 corresponding to eigenvalue of 1 - l, Q = numpy.linalg.eig(R) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no unit eigenvector corresponding to eigenvalue 1") - point = numpy.real(Q[:, i[-1]]).squeeze() - point /= point[3] - # rotation angle depending on direction - cosa = (numpy.trace(R33) - 1.0) / 2.0 - if abs(direction[2]) > 1e-8: - sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2] - elif abs(direction[1]) > 1e-8: - sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1] - else: - sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0] - angle = math.atan2(sina, cosa) - return angle, direction, point - - -def scale_matrix(factor, origin=None, direction=None): - """Return matrix to scale by factor around origin in direction. - - Use factor -1 for point symmetry. - - >>> v = (numpy.random.rand(4, 5) - 0.5) * 20.0 - >>> v[3] = 1.0 - >>> S = scale_matrix(-1.234) - >>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3]) - True - >>> factor = random.random() * 10 - 5 - >>> origin = numpy.random.random(3) - 0.5 - >>> direct = numpy.random.random(3) - 0.5 - >>> S = scale_matrix(factor, origin) - >>> S = scale_matrix(factor, origin, direct) - - """ - if direction is None: - # uniform scaling - M = numpy.array(((factor, 0.0, 0.0, 0.0), - (0.0, factor, 0.0, 0.0), - (0.0, 0.0, factor, 0.0), - (0.0, 0.0, 0.0, 1.0)), dtype=numpy.float64) - if origin is not None: - M[:3, 3] = origin[:3] - M[:3, 3] *= 1.0 - factor - else: - # nonuniform scaling - direction = unit_vector(direction[:3]) - factor = 1.0 - factor - M = numpy.identity(4) - M[:3, :3] -= factor * numpy.outer(direction, direction) - if origin is not None: - M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction - return M - - -def scale_from_matrix(matrix): - """Return scaling factor, origin and direction from scaling matrix. - - >>> factor = random.random() * 10 - 5 - >>> origin = numpy.random.random(3) - 0.5 - >>> direct = numpy.random.random(3) - 0.5 - >>> S0 = scale_matrix(factor, origin) - >>> factor, origin, direction = scale_from_matrix(S0) - >>> S1 = scale_matrix(factor, origin, direction) - >>> is_same_transform(S0, S1) - True - >>> S0 = scale_matrix(factor, origin, direct) - >>> factor, origin, direction = scale_from_matrix(S0) - >>> S1 = scale_matrix(factor, origin, direction) - >>> is_same_transform(S0, S1) - True - - """ - M = numpy.array(matrix, dtype=numpy.float64, copy=False) - M33 = M[:3, :3] - factor = numpy.trace(M33) - 2.0 - try: - # direction: unit eigenvector corresponding to eigenvalue factor - l, V = numpy.linalg.eig(M33) - i = numpy.where(abs(numpy.real(l) - factor) < 1e-8)[0][0] - direction = numpy.real(V[:, i]).squeeze() - direction /= vector_norm(direction) - except IndexError: - # uniform scaling - factor = (factor + 2.0) / 3.0 - direction = None - # origin: any eigenvector corresponding to eigenvalue 1 - l, V = numpy.linalg.eig(M) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no eigenvector corresponding to eigenvalue 1") - origin = numpy.real(V[:, i[-1]]).squeeze() - origin /= origin[3] - return factor, origin, direction - - -def projection_matrix(point, normal, direction=None, - perspective=None, pseudo=False): - """Return matrix to project onto plane defined by point and normal. - - Using either perspective point, projection direction, or none of both. - - If pseudo is True, perspective projections will preserve relative depth - such that Perspective = dot(Orthogonal, PseudoPerspective). - - >>> P = projection_matrix((0, 0, 0), (1, 0, 0)) - >>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:]) - True - >>> point = numpy.random.random(3) - 0.5 - >>> normal = numpy.random.random(3) - 0.5 - >>> direct = numpy.random.random(3) - 0.5 - >>> persp = numpy.random.random(3) - 0.5 - >>> P0 = projection_matrix(point, normal) - >>> P1 = projection_matrix(point, normal, direction=direct) - >>> P2 = projection_matrix(point, normal, perspective=persp) - >>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True) - >>> is_same_transform(P2, numpy.dot(P0, P3)) - True - >>> P = projection_matrix((3, 0, 0), (1, 1, 0), (1, 0, 0)) - >>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20.0 - >>> v0[3] = 1.0 - >>> v1 = numpy.dot(P, v0) - >>> numpy.allclose(v1[1], v0[1]) - True - >>> numpy.allclose(v1[0], 3.0-v1[1]) - True - - """ - M = numpy.identity(4) - point = numpy.array(point[:3], dtype=numpy.float64, copy=False) - normal = unit_vector(normal[:3]) - if perspective is not None: - # perspective projection - perspective = numpy.array(perspective[:3], dtype=numpy.float64, - copy=False) - M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal) - M[:3, :3] -= numpy.outer(perspective, normal) - if pseudo: - # preserve relative depth - M[:3, :3] -= numpy.outer(normal, normal) - M[:3, 3] = numpy.dot(point, normal) * (perspective+normal) - else: - M[:3, 3] = numpy.dot(point, normal) * perspective - M[3, :3] = -normal - M[3, 3] = numpy.dot(perspective, normal) - elif direction is not None: - # parallel projection - direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False) - scale = numpy.dot(direction, normal) - M[:3, :3] -= numpy.outer(direction, normal) / scale - M[:3, 3] = direction * (numpy.dot(point, normal) / scale) - else: - # orthogonal projection - M[:3, :3] -= numpy.outer(normal, normal) - M[:3, 3] = numpy.dot(point, normal) * normal - return M - - -def projection_from_matrix(matrix, pseudo=False): - """Return projection plane and perspective point from projection matrix. - - Return values are same as arguments for projection_matrix function: - point, normal, direction, perspective, and pseudo. - - >>> point = numpy.random.random(3) - 0.5 - >>> normal = numpy.random.random(3) - 0.5 - >>> direct = numpy.random.random(3) - 0.5 - >>> persp = numpy.random.random(3) - 0.5 - >>> P0 = projection_matrix(point, normal) - >>> result = projection_from_matrix(P0) - >>> P1 = projection_matrix(*result) - >>> is_same_transform(P0, P1) - True - >>> P0 = projection_matrix(point, normal, direct) - >>> result = projection_from_matrix(P0) - >>> P1 = projection_matrix(*result) - >>> is_same_transform(P0, P1) - True - >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False) - >>> result = projection_from_matrix(P0, pseudo=False) - >>> P1 = projection_matrix(*result) - >>> is_same_transform(P0, P1) - True - >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True) - >>> result = projection_from_matrix(P0, pseudo=True) - >>> P1 = projection_matrix(*result) - >>> is_same_transform(P0, P1) - True - - """ - M = numpy.array(matrix, dtype=numpy.float64, copy=False) - M33 = M[:3, :3] - l, V = numpy.linalg.eig(M) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] - if not pseudo and len(i): - # point: any eigenvector corresponding to eigenvalue 1 - point = numpy.real(V[:, i[-1]]).squeeze() - point /= point[3] - # direction: unit eigenvector corresponding to eigenvalue 0 - l, V = numpy.linalg.eig(M33) - i = numpy.where(abs(numpy.real(l)) < 1e-8)[0] - if not len(i): - raise ValueError("no eigenvector corresponding to eigenvalue 0") - direction = numpy.real(V[:, i[0]]).squeeze() - direction /= vector_norm(direction) - # normal: unit eigenvector of M33.T corresponding to eigenvalue 0 - l, V = numpy.linalg.eig(M33.T) - i = numpy.where(abs(numpy.real(l)) < 1e-8)[0] - if len(i): - # parallel projection - normal = numpy.real(V[:, i[0]]).squeeze() - normal /= vector_norm(normal) - return point, normal, direction, None, False - else: - # orthogonal projection, where normal equals direction vector - return point, direction, None, None, False - else: - # perspective projection - i = numpy.where(abs(numpy.real(l)) > 1e-8)[0] - if not len(i): - raise ValueError( - "no eigenvector not corresponding to eigenvalue 0") - point = numpy.real(V[:, i[-1]]).squeeze() - point /= point[3] - normal = - M[3, :3] - perspective = M[:3, 3] / numpy.dot(point[:3], normal) - if pseudo: - perspective -= normal - return point, normal, None, perspective, pseudo - - -def clip_matrix(left, right, bottom, top, near, far, perspective=False): - """Return matrix to obtain normalized device coordinates from frustrum. - - The frustrum bounds are axis-aligned along x (left, right), - y (bottom, top) and z (near, far). - - Normalized device coordinates are in range [-1, 1] if coordinates are - inside the frustrum. - - If perspective is True the frustrum is a truncated pyramid with the - perspective point at origin and direction along z axis, otherwise an - orthographic canonical view volume (a box). - - Homogeneous coordinates transformed by the perspective clip matrix - need to be dehomogenized (devided by w coordinate). - - >>> frustrum = numpy.random.rand(6) - >>> frustrum[1] += frustrum[0] - >>> frustrum[3] += frustrum[2] - >>> frustrum[5] += frustrum[4] - >>> M = clip_matrix(*frustrum, perspective=False) - >>> numpy.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0]) - array([-1., -1., -1., 1.]) - >>> numpy.dot(M, [frustrum[1], frustrum[3], frustrum[5], 1.0]) - array([ 1., 1., 1., 1.]) - >>> M = clip_matrix(*frustrum, perspective=True) - >>> v = numpy.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0]) - >>> v / v[3] - array([-1., -1., -1., 1.]) - >>> v = numpy.dot(M, [frustrum[1], frustrum[3], frustrum[4], 1.0]) - >>> v / v[3] - array([ 1., 1., -1., 1.]) - - """ - if left >= right or bottom >= top or near >= far: - raise ValueError("invalid frustrum") - if perspective: - if near <= _EPS: - raise ValueError("invalid frustrum: near <= 0") - t = 2.0 * near - M = ((-t/(right-left), 0.0, (right+left)/(right-left), 0.0), - (0.0, -t/(top-bottom), (top+bottom)/(top-bottom), 0.0), - (0.0, 0.0, -(far+near)/(far-near), t*far/(far-near)), - (0.0, 0.0, -1.0, 0.0)) - else: - M = ((2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)), - (0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)), - (0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)), - (0.0, 0.0, 0.0, 1.0)) - return numpy.array(M, dtype=numpy.float64) - - -def shear_matrix(angle, direction, point, normal): - """Return matrix to shear by angle along direction vector on shear plane. - - The shear plane is defined by a point and normal vector. The direction - vector must be orthogonal to the plane's normal vector. - - A point P is transformed by the shear matrix into P" such that - the vector P-P" is parallel to the direction vector and its extent is - given by the angle of P-P'-P", where P' is the orthogonal projection - of P onto the shear plane. - - >>> angle = (random.random() - 0.5) * 4*math.pi - >>> direct = numpy.random.random(3) - 0.5 - >>> point = numpy.random.random(3) - 0.5 - >>> normal = numpy.cross(direct, numpy.random.random(3)) - >>> S = shear_matrix(angle, direct, point, normal) - >>> numpy.allclose(1.0, numpy.linalg.det(S)) - True - - """ - normal = unit_vector(normal[:3]) - direction = unit_vector(direction[:3]) - if abs(numpy.dot(normal, direction)) > 1e-6: - raise ValueError("direction and normal vectors are not orthogonal") - angle = math.tan(angle) - M = numpy.identity(4) - M[:3, :3] += angle * numpy.outer(direction, normal) - M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction - return M - - -def shear_from_matrix(matrix): - """Return shear angle, direction and plane from shear matrix. - - >>> angle = (random.random() - 0.5) * 4*math.pi - >>> direct = numpy.random.random(3) - 0.5 - >>> point = numpy.random.random(3) - 0.5 - >>> normal = numpy.cross(direct, numpy.random.random(3)) - >>> S0 = shear_matrix(angle, direct, point, normal) - >>> angle, direct, point, normal = shear_from_matrix(S0) - >>> S1 = shear_matrix(angle, direct, point, normal) - >>> is_same_transform(S0, S1) - True - - """ - M = numpy.array(matrix, dtype=numpy.float64, copy=False) - M33 = M[:3, :3] - # normal: cross independent eigenvectors corresponding to the eigenvalue 1 - l, V = numpy.linalg.eig(M33) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-4)[0] - if len(i) < 2: - raise ValueError("No two linear independent eigenvectors found {}".format(l)) - V = numpy.real(V[:, i]).squeeze().T - lenorm = -1.0 - for i0, i1 in ((0, 1), (0, 2), (1, 2)): - n = numpy.cross(V[i0], V[i1]) - l = vector_norm(n) - if l > lenorm: - lenorm = l - normal = n - normal /= lenorm - # direction and angle - direction = numpy.dot(M33 - numpy.identity(3), normal) - angle = vector_norm(direction) - direction /= angle - angle = math.atan(angle) - # point: eigenvector corresponding to eigenvalue 1 - l, V = numpy.linalg.eig(M) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no eigenvector corresponding to eigenvalue 1") - point = numpy.real(V[:, i[-1]]).squeeze() - point /= point[3] - return angle, direction, point, normal - - -def decompose_matrix(matrix): - """Return sequence of transformations from transformation matrix. - - matrix : array_like - Non-degenerative homogeneous transformation matrix - - Return tuple of: - scale : vector of 3 scaling factors - shear : list of shear factors for x-y, x-z, y-z axes - angles : list of Euler angles about static x, y, z axes - translate : translation vector along x, y, z axes - perspective : perspective partition of matrix - - Raise ValueError if matrix is of wrong type or degenerative. - - >>> T0 = translation_matrix((1, 2, 3)) - >>> scale, shear, angles, trans, persp = decompose_matrix(T0) - >>> T1 = translation_matrix(trans) - >>> numpy.allclose(T0, T1) - True - >>> S = scale_matrix(0.123) - >>> scale, shear, angles, trans, persp = decompose_matrix(S) - >>> scale[0] - 0.123 - >>> R0 = euler_matrix(1, 2, 3) - >>> scale, shear, angles, trans, persp = decompose_matrix(R0) - >>> R1 = euler_matrix(*angles) - >>> numpy.allclose(R0, R1) - True - - """ - M = numpy.array(matrix, dtype=numpy.float64, copy=True).T - if abs(M[3, 3]) < _EPS: - raise ValueError("M[3, 3] is zero") - M /= M[3, 3] - P = M.copy() - P[:, 3] = 0, 0, 0, 1 - if not numpy.linalg.det(P): - raise ValueError("Matrix is singular") - - scale = numpy.zeros((3, ), dtype=numpy.float64) - shear = [0, 0, 0] - angles = [0, 0, 0] - - if any(abs(M[:3, 3]) > _EPS): - perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T)) - M[:, 3] = 0, 0, 0, 1 - else: - perspective = numpy.array((0, 0, 0, 1), dtype=numpy.float64) - - translate = M[3, :3].copy() - M[3, :3] = 0 - - row = M[:3, :3].copy() - scale[0] = vector_norm(row[0]) - row[0] /= scale[0] - shear[0] = numpy.dot(row[0], row[1]) - row[1] -= row[0] * shear[0] - scale[1] = vector_norm(row[1]) - row[1] /= scale[1] - shear[0] /= scale[1] - shear[1] = numpy.dot(row[0], row[2]) - row[2] -= row[0] * shear[1] - shear[2] = numpy.dot(row[1], row[2]) - row[2] -= row[1] * shear[2] - scale[2] = vector_norm(row[2]) - row[2] /= scale[2] - shear[1:] /= scale[2] - - if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0: - scale *= -1 - row *= -1 - - angles[1] = math.asin(-row[0, 2]) - if math.cos(angles[1]): - angles[0] = math.atan2(row[1, 2], row[2, 2]) - angles[2] = math.atan2(row[0, 1], row[0, 0]) - else: - #angles[0] = math.atan2(row[1, 0], row[1, 1]) - angles[0] = math.atan2(-row[2, 1], row[1, 1]) - angles[2] = 0.0 - - return scale, shear, angles, translate, perspective - - -def compose_matrix(scale=None, shear=None, angles=None, translate=None, - perspective=None): - """Return transformation matrix from sequence of transformations. - - This is the inverse of the decompose_matrix function. - - Sequence of transformations: - scale : vector of 3 scaling factors - shear : list of shear factors for x-y, x-z, y-z axes - angles : list of Euler angles about static x, y, z axes - translate : translation vector along x, y, z axes - perspective : perspective partition of matrix - - >>> scale = numpy.random.random(3) - 0.5 - >>> shear = numpy.random.random(3) - 0.5 - >>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi) - >>> trans = numpy.random.random(3) - 0.5 - >>> persp = numpy.random.random(4) - 0.5 - >>> M0 = compose_matrix(scale, shear, angles, trans, persp) - >>> result = decompose_matrix(M0) - >>> M1 = compose_matrix(*result) - >>> is_same_transform(M0, M1) - True - - """ - M = numpy.identity(4) - if perspective is not None: - P = numpy.identity(4) - P[3, :] = perspective[:4] - M = numpy.dot(M, P) - if translate is not None: - T = numpy.identity(4) - T[:3, 3] = translate[:3] - M = numpy.dot(M, T) - if angles is not None: - R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz') - M = numpy.dot(M, R) - if shear is not None: - Z = numpy.identity(4) - Z[1, 2] = shear[2] - Z[0, 2] = shear[1] - Z[0, 1] = shear[0] - M = numpy.dot(M, Z) - if scale is not None: - S = numpy.identity(4) - S[0, 0] = scale[0] - S[1, 1] = scale[1] - S[2, 2] = scale[2] - M = numpy.dot(M, S) - M /= M[3, 3] - return M - - -def orthogonalization_matrix(lengths, angles): - """Return orthogonalization matrix for crystallographic cell coordinates. - - Angles are expected in degrees. - - The de-orthogonalization matrix is the inverse. - - >>> O = orthogonalization_matrix((10., 10., 10.), (90., 90., 90.)) - >>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10) - True - >>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7]) - >>> numpy.allclose(numpy.sum(O), 43.063229) - True - - """ - a, b, c = lengths - angles = numpy.radians(angles) - sina, sinb, _ = numpy.sin(angles) - cosa, cosb, cosg = numpy.cos(angles) - co = (cosa * cosb - cosg) / (sina * sinb) - return numpy.array(( - ( a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0), - (-a*sinb*co, b*sina, 0.0, 0.0), - ( a*cosb, b*cosa, c, 0.0), - ( 0.0, 0.0, 0.0, 1.0)), - dtype=numpy.float64) - - -def superimposition_matrix(v0, v1, scaling=False, usesvd=True): - """Return matrix to transform given vector set into second vector set. - - v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 vectors. - - If usesvd is True, the weighted sum of squared deviations (RMSD) is - minimized according to the algorithm by W. Kabsch [8]. Otherwise the - quaternion based algorithm by B. Horn [9] is used (slower when using - this Python implementation). - - The returned matrix performs rotation, translation and uniform scaling - (if specified). - - >>> v0 = numpy.random.rand(3, 10) - >>> M = superimposition_matrix(v0, v0) - >>> numpy.allclose(M, numpy.identity(4)) - True - >>> R = random_rotation_matrix(numpy.random.random(3)) - >>> v0 = ((1,0,0), (0,1,0), (0,0,1), (1,1,1)) - >>> v1 = numpy.dot(R, v0) - >>> M = superimposition_matrix(v0, v1) - >>> numpy.allclose(v1, numpy.dot(M, v0)) - True - >>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20.0 - >>> v0[3] = 1.0 - >>> v1 = numpy.dot(R, v0) - >>> M = superimposition_matrix(v0, v1) - >>> numpy.allclose(v1, numpy.dot(M, v0)) - True - >>> S = scale_matrix(random.random()) - >>> T = translation_matrix(numpy.random.random(3)-0.5) - >>> M = concatenate_matrices(T, R, S) - >>> v1 = numpy.dot(M, v0) - >>> v0[:3] += numpy.random.normal(0.0, 1e-9, 300).reshape(3, -1) - >>> M = superimposition_matrix(v0, v1, scaling=True) - >>> numpy.allclose(v1, numpy.dot(M, v0)) - True - >>> M = superimposition_matrix(v0, v1, scaling=True, usesvd=False) - >>> numpy.allclose(v1, numpy.dot(M, v0)) - True - >>> v = numpy.empty((4, 100, 3), dtype=numpy.float64) - >>> v[:, :, 0] = v0 - >>> M = superimposition_matrix(v0, v1, scaling=True, usesvd=False) - >>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0])) - True - - """ - v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3] - v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3] - - if v0.shape != v1.shape or v0.shape[1] < 3: - raise ValueError("Vector sets are of wrong shape or type.") - - # move centroids to origin - t0 = numpy.mean(v0, axis=1) - t1 = numpy.mean(v1, axis=1) - v0 = v0 - t0.reshape(3, 1) - v1 = v1 - t1.reshape(3, 1) - - if usesvd: - # Singular Value Decomposition of covariance matrix - u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T)) - # rotation matrix from SVD orthonormal bases - R = numpy.dot(u, vh) - if numpy.linalg.det(R) < 0.0: - # R does not constitute right handed system - R -= numpy.outer(u[:, 2], vh[2, :]*2.0) - s[-1] *= -1.0 - # homogeneous transformation matrix - M = numpy.identity(4) - M[:3, :3] = R - else: - # compute symmetric matrix N - xx, yy, zz = numpy.sum(v0 * v1, axis=1) - xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1) - xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1) - N = ((xx+yy+zz, yz-zy, zx-xz, xy-yx), - (yz-zy, xx-yy-zz, xy+yx, zx+xz), - (zx-xz, xy+yx, -xx+yy-zz, yz+zy), - (xy-yx, zx+xz, yz+zy, -xx-yy+zz)) - # quaternion: eigenvector corresponding to most positive eigenvalue - l, V = numpy.linalg.eig(N) - q = V[:, numpy.argmax(l)] - q /= vector_norm(q) # unit quaternion - q = numpy.roll(q, -1) # move w component to end - # homogeneous transformation matrix - M = quaternion_matrix(q) - - # scale: ratio of rms deviations from centroid - if scaling: - v0 *= v0 - v1 *= v1 - M[:3, :3] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0)) - - # translation - M[:3, 3] = t1 - T = numpy.identity(4) - T[:3, 3] = -t0 - M = numpy.dot(M, T) - return M - - -def euler_matrix(ai, aj, ak, axes='sxyz'): - """Return homogeneous rotation matrix from Euler angles and axis sequence. - - ai, aj, ak : Euler's roll, pitch and yaw angles - axes : One of 24 axis sequences as string or encoded tuple - - >>> R = euler_matrix(1, 2, 3, 'syxz') - >>> numpy.allclose(numpy.sum(R[0]), -1.34786452) - True - >>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1)) - >>> numpy.allclose(numpy.sum(R[0]), -0.383436184) - True - >>> ai, aj, ak = (4.0*math.pi) * (numpy.random.random(3) - 0.5) - >>> for axes in _AXES2TUPLE.keys(): - ... R = euler_matrix(ai, aj, ak, axes) - >>> for axes in _TUPLE2AXES.keys(): - ... R = euler_matrix(ai, aj, ak, axes) - - """ - try: - firstaxis, parity, repetition, frame = _AXES2TUPLE[axes] - except (AttributeError, KeyError): - _ = _TUPLE2AXES[axes] - firstaxis, parity, repetition, frame = axes - - i = firstaxis - j = _NEXT_AXIS[i+parity] - k = _NEXT_AXIS[i-parity+1] - - if frame: - ai, ak = ak, ai - if parity: - ai, aj, ak = -ai, -aj, -ak - - si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak) - ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak) - cc, cs = ci*ck, ci*sk - sc, ss = si*ck, si*sk - - M = numpy.identity(4) - if repetition: - M[i, i] = cj - M[i, j] = sj*si - M[i, k] = sj*ci - M[j, i] = sj*sk - M[j, j] = -cj*ss+cc - M[j, k] = -cj*cs-sc - M[k, i] = -sj*ck - M[k, j] = cj*sc+cs - M[k, k] = cj*cc-ss - else: - M[i, i] = cj*ck - M[i, j] = sj*sc-cs - M[i, k] = sj*cc+ss - M[j, i] = cj*sk - M[j, j] = sj*ss+cc - M[j, k] = sj*cs-sc - M[k, i] = -sj - M[k, j] = cj*si - M[k, k] = cj*ci - return M - - -def euler_from_matrix(matrix, axes='sxyz'): - """Return Euler angles from rotation matrix for specified axis sequence. - - axes : One of 24 axis sequences as string or encoded tuple - - Note that many Euler angle triplets can describe one matrix. - - >>> R0 = euler_matrix(1, 2, 3, 'syxz') - >>> al, be, ga = euler_from_matrix(R0, 'syxz') - >>> R1 = euler_matrix(al, be, ga, 'syxz') - >>> numpy.allclose(R0, R1) - True - >>> angles = (4.0*math.pi) * (numpy.random.random(3) - 0.5) - >>> for axes in _AXES2TUPLE.keys(): - ... R0 = euler_matrix(axes=axes, *angles) - ... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes)) - ... if not numpy.allclose(R0, R1): print axes, "failed" - - """ - try: - firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()] - except (AttributeError, KeyError): - _ = _TUPLE2AXES[axes] - firstaxis, parity, repetition, frame = axes - - i = firstaxis - j = _NEXT_AXIS[i+parity] - k = _NEXT_AXIS[i-parity+1] - - M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3] - if repetition: - sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k]) - if sy > _EPS: - ax = math.atan2( M[i, j], M[i, k]) - ay = math.atan2( sy, M[i, i]) - az = math.atan2( M[j, i], -M[k, i]) - else: - ax = math.atan2(-M[j, k], M[j, j]) - ay = math.atan2( sy, M[i, i]) - az = 0.0 - else: - cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i]) - if cy > _EPS: - ax = math.atan2( M[k, j], M[k, k]) - ay = math.atan2(-M[k, i], cy) - az = math.atan2( M[j, i], M[i, i]) - else: - ax = math.atan2(-M[j, k], M[j, j]) - ay = math.atan2(-M[k, i], cy) - az = 0.0 - - if parity: - ax, ay, az = -ax, -ay, -az - if frame: - ax, az = az, ax - return ax, ay, az - - -def euler_from_quaternion(quaternion, axes='sxyz'): - """Return Euler angles from quaternion for specified axis sequence. - - >>> angles = euler_from_quaternion([0.06146124, 0, 0, 0.99810947]) - >>> numpy.allclose(angles, [0.123, 0, 0]) - True - - """ - return euler_from_matrix(quaternion_matrix(quaternion), axes) - - -def quaternion_from_euler(ai, aj, ak, axes='sxyz'): - """Return quaternion from Euler angles and axis sequence. - - ai, aj, ak : Euler's roll, pitch and yaw angles - axes : One of 24 axis sequences as string or encoded tuple - - >>> q = quaternion_from_euler(1, 2, 3, 'ryxz') - >>> numpy.allclose(q, [0.310622, -0.718287, 0.444435, 0.435953]) - True - - """ - try: - firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()] - except (AttributeError, KeyError): - _ = _TUPLE2AXES[axes] - firstaxis, parity, repetition, frame = axes - - i = firstaxis - j = _NEXT_AXIS[i+parity] - k = _NEXT_AXIS[i-parity+1] - - if frame: - ai, ak = ak, ai - if parity: - aj = -aj - - ai /= 2.0 - aj /= 2.0 - ak /= 2.0 - ci = math.cos(ai) - si = math.sin(ai) - cj = math.cos(aj) - sj = math.sin(aj) - ck = math.cos(ak) - sk = math.sin(ak) - cc = ci*ck - cs = ci*sk - sc = si*ck - ss = si*sk - - quaternion = numpy.empty((4, ), dtype=numpy.float64) - if repetition: - quaternion[i] = cj*(cs + sc) - quaternion[j] = sj*(cc + ss) - quaternion[k] = sj*(cs - sc) - quaternion[3] = cj*(cc - ss) - else: - quaternion[i] = cj*sc - sj*cs - quaternion[j] = cj*ss + sj*cc - quaternion[k] = cj*cs - sj*sc - quaternion[3] = cj*cc + sj*ss - if parity: - quaternion[j] *= -1 - - return quaternion - - -def quaternion_about_axis(angle, axis): - """Return quaternion for rotation about axis. - - >>> q = quaternion_about_axis(0.123, (1, 0, 0)) - >>> numpy.allclose(q, [0.06146124, 0, 0, 0.99810947]) - True - - """ - quaternion = numpy.zeros((4, ), dtype=numpy.float64) - quaternion[:3] = axis[:3] - qlen = vector_norm(quaternion) - if qlen > _EPS: - quaternion *= math.sin(angle/2.0) / qlen - quaternion[3] = math.cos(angle/2.0) - return quaternion - - -def quaternion_matrix(quaternion): - """Return homogeneous rotation matrix from quaternion. - - >>> R = quaternion_matrix([0.06146124, 0, 0, 0.99810947]) - >>> numpy.allclose(R, rotation_matrix(0.123, (1, 0, 0))) - True - - """ - q = numpy.array(quaternion[:4], dtype=numpy.float64, copy=True) - nq = numpy.dot(q, q) - if nq < _EPS: - return numpy.identity(4) - q *= math.sqrt(2.0 / nq) - q = numpy.outer(q, q) - return numpy.array(( - (1.0-q[1, 1]-q[2, 2], q[0, 1]-q[2, 3], q[0, 2]+q[1, 3], 0.0), - ( q[0, 1]+q[2, 3], 1.0-q[0, 0]-q[2, 2], q[1, 2]-q[0, 3], 0.0), - ( q[0, 2]-q[1, 3], q[1, 2]+q[0, 3], 1.0-q[0, 0]-q[1, 1], 0.0), - ( 0.0, 0.0, 0.0, 1.0) - ), dtype=numpy.float64) - - -def quaternion_from_matrix(matrix): - """Return quaternion from rotation matrix. - - >>> R = rotation_matrix(0.123, (1, 2, 3)) - >>> q = quaternion_from_matrix(R) - >>> numpy.allclose(q, [0.0164262, 0.0328524, 0.0492786, 0.9981095]) - True - - """ - q = numpy.empty((4, ), dtype=numpy.float64) - M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4] - t = numpy.trace(M) - if t > M[3, 3]: - q[3] = t - q[2] = M[1, 0] - M[0, 1] - q[1] = M[0, 2] - M[2, 0] - q[0] = M[2, 1] - M[1, 2] - else: - i, j, k = 0, 1, 2 - if M[1, 1] > M[0, 0]: - i, j, k = 1, 2, 0 - if M[2, 2] > M[i, i]: - i, j, k = 2, 0, 1 - t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3] - q[i] = t - q[j] = M[i, j] + M[j, i] - q[k] = M[k, i] + M[i, k] - q[3] = M[k, j] - M[j, k] - q *= 0.5 / math.sqrt(t * M[3, 3]) - return q - - -def quaternion_multiply(quaternion1, quaternion0): - """Return multiplication of two quaternions. - - >>> q = quaternion_multiply([1, -2, 3, 4], [-5, 6, 7, 8]) - >>> numpy.allclose(q, [-44, -14, 48, 28]) - True - - """ - x0, y0, z0, w0 = quaternion0 - x1, y1, z1, w1 = quaternion1 - return numpy.array(( - x1*w0 + y1*z0 - z1*y0 + w1*x0, - -x1*z0 + y1*w0 + z1*x0 + w1*y0, - x1*y0 - y1*x0 + z1*w0 + w1*z0, - -x1*x0 - y1*y0 - z1*z0 + w1*w0), dtype=numpy.float64) - - -def quaternion_conjugate(quaternion): - """Return conjugate of quaternion. - - >>> q0 = random_quaternion() - >>> q1 = quaternion_conjugate(q0) - >>> q1[3] == q0[3] and all(q1[:3] == -q0[:3]) - True - - """ - return numpy.array((-quaternion[0], -quaternion[1], - -quaternion[2], quaternion[3]), dtype=numpy.float64) - - -def quaternion_inverse(quaternion): - """Return inverse of quaternion. - - >>> q0 = random_quaternion() - >>> q1 = quaternion_inverse(q0) - >>> numpy.allclose(quaternion_multiply(q0, q1), [0, 0, 0, 1]) - True - - """ - return quaternion_conjugate(quaternion) / numpy.dot(quaternion, quaternion) - - -def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True): - """Return spherical linear interpolation between two quaternions. - - >>> q0 = random_quaternion() - >>> q1 = random_quaternion() - >>> q = quaternion_slerp(q0, q1, 0.0) - >>> numpy.allclose(q, q0) - True - >>> q = quaternion_slerp(q0, q1, 1.0, 1) - >>> numpy.allclose(q, q1) - True - >>> q = quaternion_slerp(q0, q1, 0.5) - >>> angle = math.acos(numpy.dot(q0, q)) - >>> numpy.allclose(2.0, math.acos(numpy.dot(q0, q1)) / angle) or \ - numpy.allclose(2.0, math.acos(-numpy.dot(q0, q1)) / angle) - True - - """ - q0 = unit_vector(quat0[:4]) - q1 = unit_vector(quat1[:4]) - if fraction == 0.0: - return q0 - elif fraction == 1.0: - return q1 - d = numpy.dot(q0, q1) - if abs(abs(d) - 1.0) < _EPS: - return q0 - if shortestpath and d < 0.0: - # invert rotation - d = -d - q1 *= -1.0 - angle = math.acos(d) + spin * math.pi - if abs(angle) < _EPS: - return q0 - isin = 1.0 / math.sin(angle) - q0 *= math.sin((1.0 - fraction) * angle) * isin - q1 *= math.sin(fraction * angle) * isin - q0 += q1 - return q0 - - -def random_quaternion(rand=None): - """Return uniform random unit quaternion. - - rand: array like or None - Three independent random variables that are uniformly distributed - between 0 and 1. - - >>> q = random_quaternion() - >>> numpy.allclose(1.0, vector_norm(q)) - True - >>> q = random_quaternion(numpy.random.random(3)) - >>> q.shape - (4,) - - """ - if rand is None: - rand = numpy.random.rand(3) - else: - assert len(rand) == 3 - r1 = numpy.sqrt(1.0 - rand[0]) - r2 = numpy.sqrt(rand[0]) - pi2 = math.pi * 2.0 - t1 = pi2 * rand[1] - t2 = pi2 * rand[2] - return numpy.array((numpy.sin(t1)*r1, - numpy.cos(t1)*r1, - numpy.sin(t2)*r2, - numpy.cos(t2)*r2), dtype=numpy.float64) - - -def random_rotation_matrix(rand=None): - """Return uniform random rotation matrix. - - rnd: array like - Three independent random variables that are uniformly distributed - between 0 and 1 for each returned quaternion. - - >>> R = random_rotation_matrix() - >>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4)) - True - - """ - return quaternion_matrix(random_quaternion(rand)) - - -class Arcball(object): - """Virtual Trackball Control. - - >>> ball = Arcball() - >>> ball = Arcball(initial=numpy.identity(4)) - >>> ball.place([320, 320], 320) - >>> ball.down([500, 250]) - >>> ball.drag([475, 275]) - >>> R = ball.matrix() - >>> numpy.allclose(numpy.sum(R), 3.90583455) - True - >>> ball = Arcball(initial=[0, 0, 0, 1]) - >>> ball.place([320, 320], 320) - >>> ball.setaxes([1,1,0], [-1, 1, 0]) - >>> ball.setconstrain(True) - >>> ball.down([400, 200]) - >>> ball.drag([200, 400]) - >>> R = ball.matrix() - >>> numpy.allclose(numpy.sum(R), 0.2055924) - True - >>> ball.next() - - """ - - def __init__(self, initial=None): - """Initialize virtual trackball control. - - initial : quaternion or rotation matrix - - """ - self._axis = None - self._axes = None - self._radius = 1.0 - self._center = [0.0, 0.0] - self._vdown = numpy.array([0, 0, 1], dtype=numpy.float64) - self._constrain = False - - if initial is None: - self._qdown = numpy.array([0, 0, 0, 1], dtype=numpy.float64) - else: - initial = numpy.array(initial, dtype=numpy.float64) - if initial.shape == (4, 4): - self._qdown = quaternion_from_matrix(initial) - elif initial.shape == (4, ): - initial /= vector_norm(initial) - self._qdown = initial - else: - raise ValueError("initial not a quaternion or matrix.") - - self._qnow = self._qpre = self._qdown - - def place(self, center, radius): - """Place Arcball, e.g. when window size changes. - - center : sequence[2] - Window coordinates of trackball center. - radius : float - Radius of trackball in window coordinates. - - """ - self._radius = float(radius) - self._center[0] = center[0] - self._center[1] = center[1] - - def setaxes(self, *axes): - """Set axes to constrain rotations.""" - if axes is None: - self._axes = None - else: - self._axes = [unit_vector(axis) for axis in axes] - - def setconstrain(self, constrain): - """Set state of constrain to axis mode.""" - self._constrain = constrain == True - - def getconstrain(self): - """Return state of constrain to axis mode.""" - return self._constrain - - def down(self, point): - """Set initial cursor window coordinates and pick constrain-axis.""" - self._vdown = arcball_map_to_sphere(point, self._center, self._radius) - self._qdown = self._qpre = self._qnow - - if self._constrain and self._axes is not None: - self._axis = arcball_nearest_axis(self._vdown, self._axes) - self._vdown = arcball_constrain_to_axis(self._vdown, self._axis) - else: - self._axis = None - - def drag(self, point): - """Update current cursor window coordinates.""" - vnow = arcball_map_to_sphere(point, self._center, self._radius) - - if self._axis is not None: - vnow = arcball_constrain_to_axis(vnow, self._axis) - - self._qpre = self._qnow - - t = numpy.cross(self._vdown, vnow) - if numpy.dot(t, t) < _EPS: - self._qnow = self._qdown - else: - q = [t[0], t[1], t[2], numpy.dot(self._vdown, vnow)] - self._qnow = quaternion_multiply(q, self._qdown) - - def next(self, acceleration=0.0): - """Continue rotation in direction of last drag.""" - q = quaternion_slerp(self._qpre, self._qnow, 2.0+acceleration, False) - self._qpre, self._qnow = self._qnow, q - - def matrix(self): - """Return homogeneous rotation matrix.""" - return quaternion_matrix(self._qnow) - - -def arcball_map_to_sphere(point, center, radius): - """Return unit sphere coordinates from window coordinates.""" - v = numpy.array(((point[0] - center[0]) / radius, - (center[1] - point[1]) / radius, - 0.0), dtype=numpy.float64) - n = v[0]*v[0] + v[1]*v[1] - if n > 1.0: - v /= math.sqrt(n) # position outside of sphere - else: - v[2] = math.sqrt(1.0 - n) - return v - - -def arcball_constrain_to_axis(point, axis): - """Return sphere point perpendicular to axis.""" - v = numpy.array(point, dtype=numpy.float64, copy=True) - a = numpy.array(axis, dtype=numpy.float64, copy=True) - v -= a * numpy.dot(a, v) # on plane - n = vector_norm(v) - if n > _EPS: - if v[2] < 0.0: - v *= -1.0 - v /= n - return v - if a[2] == 1.0: - return numpy.array([1, 0, 0], dtype=numpy.float64) - return unit_vector([-a[1], a[0], 0]) - - -def arcball_nearest_axis(point, axes): - """Return axis, which arc is nearest to point.""" - point = numpy.array(point, dtype=numpy.float64, copy=False) - nearest = None - mx = -1.0 - for axis in axes: - t = numpy.dot(arcball_constrain_to_axis(point, axis), point) - if t > mx: - nearest = axis - mx = t - return nearest - - -# epsilon for testing whether a number is close to zero -_EPS = numpy.finfo(float).eps * 4.0 - -# axis sequences for Euler angles -_NEXT_AXIS = [1, 2, 0, 1] - -# map axes strings to/from tuples of inner axis, parity, repetition, frame -_AXES2TUPLE = { - 'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0), - 'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0), - 'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0), - 'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0), - 'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1), - 'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1), - 'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1), - 'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)} - -_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items()) - -# helper functions - -def vector_norm(data, axis=None, out=None): - """Return length, i.e. eucledian norm, of ndarray along axis. - - >>> v = numpy.random.random(3) - >>> n = vector_norm(v) - >>> numpy.allclose(n, numpy.linalg.norm(v)) - True - >>> v = numpy.random.rand(6, 5, 3) - >>> n = vector_norm(v, axis=-1) - >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2))) - True - >>> n = vector_norm(v, axis=1) - >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) - True - >>> v = numpy.random.rand(5, 4, 3) - >>> n = numpy.empty((5, 3), dtype=numpy.float64) - >>> vector_norm(v, axis=1, out=n) - >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) - True - >>> vector_norm([]) - 0.0 - >>> vector_norm([1.0]) - 1.0 - - """ - data = numpy.array(data, dtype=numpy.float64, copy=True) - if out is None: - if data.ndim == 1: - return math.sqrt(numpy.dot(data, data)) - data *= data - out = numpy.atleast_1d(numpy.sum(data, axis=axis)) - numpy.sqrt(out, out) - return out - else: - data *= data - numpy.sum(data, axis=axis, out=out) - numpy.sqrt(out, out) - - -def unit_vector(data, axis=None, out=None): - """Return ndarray normalized by length, i.e. eucledian norm, along axis. - - >>> v0 = numpy.random.random(3) - >>> v1 = unit_vector(v0) - >>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0)) - True - >>> v0 = numpy.random.rand(5, 4, 3) - >>> v1 = unit_vector(v0, axis=-1) - >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2) - >>> numpy.allclose(v1, v2) - True - >>> v1 = unit_vector(v0, axis=1) - >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1) - >>> numpy.allclose(v1, v2) - True - >>> v1 = numpy.empty((5, 4, 3), dtype=numpy.float64) - >>> unit_vector(v0, axis=1, out=v1) - >>> numpy.allclose(v1, v2) - True - >>> list(unit_vector([])) - [] - >>> list(unit_vector([1.0])) - [1.0] - - """ - if out is None: - data = numpy.array(data, dtype=numpy.float64, copy=True) - if data.ndim == 1: - data /= math.sqrt(numpy.dot(data, data)) - return data - else: - if out is not data: - out[:] = numpy.array(data, copy=False) - data = out - length = numpy.atleast_1d(numpy.sum(data*data, axis)) - numpy.sqrt(length, length) - if axis is not None: - length = numpy.expand_dims(length, axis) - data /= length - if out is None: - return data - - -def random_vector(size): - """Return array of random doubles in the half-open interval [0.0, 1.0). - - >>> v = random_vector(10000) - >>> numpy.all(v >= 0.0) and numpy.all(v < 1.0) - True - >>> v0 = random_vector(10) - >>> v1 = random_vector(10) - >>> numpy.any(v0 == v1) - False - - """ - return numpy.random.random(size) - - -def inverse_matrix(matrix): - """Return inverse of square transformation matrix. - - >>> M0 = random_rotation_matrix() - >>> M1 = inverse_matrix(M0.T) - >>> numpy.allclose(M1, numpy.linalg.inv(M0.T)) - True - >>> for size in range(1, 7): - ... M0 = numpy.random.rand(size, size) - ... M1 = inverse_matrix(M0) - ... if not numpy.allclose(M1, numpy.linalg.inv(M0)): print size - - """ - return numpy.linalg.inv(matrix) - - -def concatenate_matrices(*matrices): - """Return concatenation of series of transformation matrices. - - >>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5 - >>> numpy.allclose(M, concatenate_matrices(M)) - True - >>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T)) - True - - """ - M = numpy.identity(4) - for i in matrices: - M = numpy.dot(M, i) - return M - - -def is_same_transform(matrix0, matrix1): - """Return True if two matrices perform same transformation. - - >>> is_same_transform(numpy.identity(4), numpy.identity(4)) - True - >>> is_same_transform(numpy.identity(4), random_rotation_matrix()) - False - - """ - matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True) - matrix0 /= matrix0[3, 3] - matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True) - matrix1 /= matrix1[3, 3] - return numpy.allclose(matrix0, matrix1) - - -def _import_module(module_name, warn=True, prefix='_py_', ignore='_'): - """Try import all public attributes from module into global namespace. - - Existing attributes with name clashes are renamed with prefix. - Attributes starting with underscore are ignored by default. - - Return True on successful import. - - """ - try: - module = __import__(module_name) - except ImportError: - if warn: - warnings.warn("Failed to import module " + module_name) - else: - for attr in dir(module): - if ignore and attr.startswith(ignore): - continue - if prefix: - if attr in globals(): - globals()[prefix + attr] = globals()[attr] - elif warn: - warnings.warn("No Python implementation of " + attr) - globals()[attr] = getattr(module, attr) - return True \ No newline at end of file diff --git a/legacy/common_math/srv/TfTransform.srv b/legacy/common_math/srv/TfTransform.srv deleted file mode 100644 index 9d8833524..000000000 --- a/legacy/common_math/srv/TfTransform.srv +++ /dev/null @@ -1,8 +0,0 @@ -geometry_msgs/PoseArray pose_array -sensor_msgs/PointCloud2 pointcloud -geometry_msgs/PointStamped point -std_msgs/String target_frame ---- -geometry_msgs/PoseArray target_pose_array -sensor_msgs/PointCloud2 target_pointcloud -geometry_msgs/PointStamped target_point \ No newline at end of file diff --git a/legacy/cv_bridge3/CMakeLists.txt b/legacy/cv_bridge3/CMakeLists.txt deleted file mode 100644 index 05f19fb93..000000000 --- a/legacy/cv_bridge3/CMakeLists.txt +++ /dev/null @@ -1,205 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(cv_bridge3) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED catkin_virtualenv) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -catkin_python_setup() -catkin_generate_virtualenv( - INPUT_REQUIREMENTS requirements.in - PYTHON_INTERPRETER python3.10 -) - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder -# add_service_files( -# FILES -# Service1.srv -# Service2.srv -# ) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -# generate_messages( -# DEPENDENCIES -# std_msgs # Or other packages containing msgs -# ) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES cv_bridge3 -# CATKIN_DEPENDS other_catkin_pkg -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include -# ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/cv_bridge3.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/cv_bridge3_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -install(FILES - requirements.txt - DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_cv_bridge3.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/legacy/cv_bridge3/README.md b/legacy/cv_bridge3/README.md deleted file mode 100644 index f982ff4e2..000000000 --- a/legacy/cv_bridge3/README.md +++ /dev/null @@ -1,50 +0,0 @@ -# cv_bridge3 - -The cv_bridge3 package - -This package is maintained by: -- [jared](mailto:jared@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- catkin_virtualenv (build) - -This packages requires Python 3.10 to be present. - -This package has 2 Python dependencies: -- [opencv-python](https://pypi.org/project/opencv-python)==4.8.0.76 -- .. and 1 sub dependencies - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -This package has no launch files. - -### Messages - -This package has no messages. - -### Services - -This package has no services. - -### Actions - -This package has no actions. diff --git a/legacy/cv_bridge3/package.xml b/legacy/cv_bridge3/package.xml deleted file mode 100644 index 7ae194569..000000000 --- a/legacy/cv_bridge3/package.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - cv_bridge3 - 0.0.0 - The cv_bridge3 package - - - - - jared - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - catkin_virtualenv - - - - - - requirements.txt - - diff --git a/legacy/cv_bridge3/requirements.in b/legacy/cv_bridge3/requirements.in deleted file mode 100644 index 6c31834e2..000000000 --- a/legacy/cv_bridge3/requirements.in +++ /dev/null @@ -1 +0,0 @@ -opencv-python==4.8.0.76 diff --git a/legacy/cv_bridge3/requirements.txt b/legacy/cv_bridge3/requirements.txt deleted file mode 100644 index cd1e4c13c..000000000 --- a/legacy/cv_bridge3/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -numpy==1.25.2 # via opencv-python -opencv-python==4.8.0.76 # via -r requirements.in diff --git a/legacy/cv_bridge3/setup.py b/legacy/cv_bridge3/setup.py deleted file mode 100644 index 3fb001cbc..000000000 --- a/legacy/cv_bridge3/setup.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -# fetch values from package.xml -setup_args = generate_distutils_setup( - packages=['cv_bridge3'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/cv_bridge3/src/cv_bridge3/__init__.py b/legacy/cv_bridge3/src/cv_bridge3/__init__.py deleted file mode 100644 index c934c3c44..000000000 --- a/legacy/cv_bridge3/src/cv_bridge3/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from cv_bridge3.cv_bridge3 import CvBridge -from cv_bridge3.cv_bridge3 import cv2 diff --git a/legacy/cv_bridge3/src/cv_bridge3/cv_bridge3.py b/legacy/cv_bridge3/src/cv_bridge3/cv_bridge3.py deleted file mode 100644 index b8140e71b..000000000 --- a/legacy/cv_bridge3/src/cv_bridge3/cv_bridge3.py +++ /dev/null @@ -1,222 +0,0 @@ -# From https://answers.ros.org/question/350904/cv_bridge-throws-boost-import-error-in-python-3-and-ros-melodic/?answer=374931#post-id-374931 -# Fix for cv_bridge not working in Python 3 without recompilation. - -""" - Provides conversions between OpenCV and ROS image formats in a hard-coded way. - CV_Bridge, the module usually responsible for doing this, is not compatible with Python 3, - - the language this all is written in. So we create this module, and all is... well, all is not well, - - but all works. :-/ - Author(s): Gerard Canal -""" -import sys -import numpy as np -import sensor_msgs -import rospy - -# Hack to import cv2 from https://answers.ros.org/question/290660/import-cv2-error-caused-by-ros/?answer=331764#post-id-331764 -import sys -melodic_dist_pkg = '/opt/ros/melodic/lib/python2.7/dist-packages' -is_melodic = melodic_dist_pkg in sys.path -if is_melodic: - sys.path.remove(melodic_dist_pkg) # in order to import cv2 under python3 -import cv2 -if is_melodic: - sys.path.append(melodic_dist_pkg) # append back in order to import rospy - - -class CvBridge: - def __init__(self): - import cv2 - self.cvtype_to_name = {} - self.cvdepth_to_numpy_depth = {cv2.CV_8U: 'uint8', cv2.CV_8S: 'int8', cv2.CV_16U: 'uint16', - cv2.CV_16S: 'int16', cv2.CV_32S:'int32', cv2.CV_32F:'float32', - cv2.CV_64F: 'float64'} - - for t in ["8U", "8S", "16U", "16S", "32S", "32F", "64F"]: - for c in [1, 2, 3, 4]: - nm = "%sC%d" % (t, c) - self.cvtype_to_name[getattr(cv2, "CV_%s" % nm)] = nm - - self.numpy_type_to_cvtype = {'uint8': '8U', 'int8': '8S', 'uint16': '16U', - 'int16': '16S', 'int32': '32S', 'float32': '32F', - 'float64': '64F'} - self.numpy_type_to_cvtype.update(dict((v, k) for (k, v) in self.numpy_type_to_cvtype.items())) - - def dtype_with_channels_to_cvtype2(self, dtype, n_channels): - return '%sC%d' % (self.numpy_type_to_cvtype[dtype.name], n_channels) - - def cvtype2_to_dtype_with_channels(self, cvtype): - from cv_bridge.boost.cv_bridge_boost import CV_MAT_CNWrap, CV_MAT_DEPTHWrap - return self.cvdepth_to_numpy_depth[CV_MAT_DEPTHWrap(cvtype)], CV_MAT_CNWrap(cvtype) - - def encoding_to_cvtype2(self, encoding): - from cv_bridge.boost.cv_bridge_boost import getCvType - - try: - return getCvType(encoding) - except RuntimeError as e: - raise CvBridgeError(e) - - def encoding_to_dtype_with_channels(self, encoding): - return self.cvtype2_to_dtype_with_channels(self.encoding_to_cvtype2(encoding)) - - def compressed_imgmsg_to_cv2(self, cmprs_img_msg, desired_encoding = "passthrough"): - """ - Convert a sensor_msgs::CompressedImage message to an OpenCV :cpp:type:`cv::Mat`. - :param cmprs_img_msg: A :cpp:type:`sensor_msgs::CompressedImage` message - :param desired_encoding: The encoding of the image data, one of the following strings: - * ``"passthrough"`` - * one of the standard strings in sensor_msgs/image_encodings.h - :rtype: :cpp:type:`cv::Mat` - :raises CvBridgeError: when conversion is not possible. - If desired_encoding is ``"passthrough"``, then the returned image has the same format as img_msg. - Otherwise desired_encoding must be one of the standard image encodings - This function returns an OpenCV :cpp:type:`cv::Mat` message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure. - If the image only has one channel, the shape has size 2 (width and height) - """ - import cv2 - import numpy as np - - str_msg = cmprs_img_msg.data - buf = np.ndarray(shape=(1, len(str_msg)), - dtype=np.uint8, buffer=cmprs_img_msg.data) - im = cv2.imdecode(buf, cv2.IMREAD_UNCHANGED) - - if desired_encoding == "passthrough": - return im - - from cv_bridge.boost.cv_bridge_boost import cvtColor2 - - try: - res = cvtColor2(im, "bgr8", desired_encoding) - except RuntimeError as e: - raise CvBridgeError(e) - - return res - - def imgmsg_to_cv2_np(self, img_msg): - im = np.frombuffer(img_msg.data, dtype=np.uint8).reshape(img_msg.height, img_msg.width, -1) - print(img_msg.encoding) - if img_msg.encoding == 'rgb8': - return cv2.cvtColor(im, cv2.COLOR_RGB2BGR) - elif img_msg.encoding in ["bgr8", "8UC3", "mono8"]: - return im - - - def imgmsg_to_cv2(self, img_msg, desired_encoding = "passthrough"): - """ - Convert a sensor_msgs::Image message to an OpenCV :cpp:type:`cv::Mat`. - :param img_msg: A :cpp:type:`sensor_msgs::Image` message - :param desired_encoding: The encoding of the image data, one of the following strings: - * ``"passthrough"`` - * one of the standard strings in sensor_msgs/image_encodings.h - :rtype: :cpp:type:`cv::Mat` - :raises CvBridgeError: when conversion is not possible. - If desired_encoding is ``"passthrough"``, then the returned image has the same format as img_msg. - Otherwise desired_encoding must be one of the standard image encodings - This function returns an OpenCV :cpp:type:`cv::Mat` message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure. - If the image only has one channel, the shape has size 2 (width and height) - """ - import cv2 - import numpy as np - dtype, n_channels = self.encoding_to_dtype_with_channels(img_msg.encoding) - dtype = np.dtype(dtype) - dtype = dtype.newbyteorder('>' if img_msg.is_bigendian else '<') - if n_channels == 1: - im = np.ndarray(shape=(img_msg.height, img_msg.width), - dtype=dtype, buffer=img_msg.data) - else: - if(type(img_msg.data) == str): - im = np.ndarray(shape=(img_msg.height, img_msg.width, n_channels), - dtype=dtype, buffer=img_msg.data.encode()) - else: - im = np.ndarray(shape=(img_msg.height, img_msg.width, n_channels), - dtype=dtype, buffer=img_msg.data) - # If the byte order is different between the message and the system. - if img_msg.is_bigendian == (sys.byteorder == 'little'): - im = im.byteswap().newbyteorder() - - if desired_encoding == "passthrough": - return im - - from cv_bridge.boost.cv_bridge_boost import cvtColor2 - - try: - res = cvtColor2(im, img_msg.encoding, desired_encoding) - except RuntimeError as e: - raise CvBridgeError(e) - - return res - - def cv2_to_compressed_imgmsg(self, cvim, dst_format = "jpg"): - """ - Convert an OpenCV :cpp:type:`cv::Mat` type to a ROS sensor_msgs::CompressedImage message. - :param cvim: An OpenCV :cpp:type:`cv::Mat` - :param dst_format: The format of the image data, one of the following strings: - * from http://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html - * from http://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#Mat imread(const string& filename, int flags) - * bmp, dib - * jpeg, jpg, jpe - * jp2 - * png - * pbm, pgm, ppm - * sr, ras - * tiff, tif - :rtype: A sensor_msgs.msg.CompressedImage message - :raises CvBridgeError: when the ``cvim`` has a type that is incompatible with ``format`` - This function returns a sensor_msgs::Image message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure. - """ - import cv2 - import numpy as np - if not isinstance(cvim, (np.ndarray, np.generic)): - raise TypeError('Your input type is not a numpy array') - cmprs_img_msg = sensor_msgs.msg.CompressedImage() - cmprs_img_msg.format = dst_format - ext_format = '.' + dst_format - try: - cmprs_img_msg.data = np.array(cv2.imencode(ext_format, cvim)[1]).tostring() - except RuntimeError as e: - raise CvBridgeError(e) - - return cmprs_img_msg - - def cv2_to_imgmsg(self, cvim, encoding = "passthrough", header = None): - """ - Convert an OpenCV :cpp:type:`cv::Mat` type to a ROS sensor_msgs::Image message. - :param cvim: An OpenCV :cpp:type:`cv::Mat` - :param encoding: The encoding of the image data, one of the following strings: - * ``"passthrough"`` - * one of the standard strings in sensor_msgs/image_encodings.h - :param header: A std_msgs.msg.Header message - :rtype: A sensor_msgs.msg.Image message - :raises CvBridgeError: when the ``cvim`` has a type that is incompatible with ``encoding`` - If encoding is ``"passthrough"``, then the message has the same encoding as the image's OpenCV type. - Otherwise desired_encoding must be one of the standard image encodings - This function returns a sensor_msgs::Image message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure. - """ - import cv2 - import numpy as np - if not isinstance(cvim, (np.ndarray, np.generic)): - raise TypeError('Your input type is not a numpy array') - img_msg = sensor_msgs.msg.Image() - img_msg.height = cvim.shape[0] - img_msg.width = cvim.shape[1] - if header is not None: - img_msg.header = header - if len(cvim.shape) < 3: - cv_type = self.dtype_with_channels_to_cvtype2(cvim.dtype, 1) - else: - cv_type = self.dtype_with_channels_to_cvtype2(cvim.dtype, cvim.shape[2]) - if encoding == "passthrough": - img_msg.encoding = cv_type - else: - img_msg.encoding = encoding - # Verify that the supplied encoding is compatible with the type of the OpenCV image - if self.cvtype_to_name[self.encoding_to_cvtype2(encoding)] != cv_type: - raise CvBridgeError("encoding specified as %s, but image has incompatible type %s" % (encoding, cv_type)) - if cvim.dtype.byteorder == '>': - img_msg.is_bigendian = True - img_msg.data = cvim.tostring() - img_msg.step = len(img_msg.data) // img_msg.height - - return img_msg \ No newline at end of file diff --git a/legacy/face_detection/CMakeLists.txt b/legacy/face_detection/CMakeLists.txt deleted file mode 100644 index f54e4bdd9..000000000 --- a/legacy/face_detection/CMakeLists.txt +++ /dev/null @@ -1,208 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(face_detection) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - rospy - std_msgs - sensor_msgs - message_runtime - message_generation - lasr_vision_msgs -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -#add_message_files( -# FILES -# Detection.msg -#) - -## Generate services in the 'srv' folder -add_service_files( - FILES - FaceDetection.srv -) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -generate_messages( - DEPENDENCIES - std_msgs - sensor_msgs - lasr_vision_msgs -) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES lasr_object_detection_yolo -# CATKIN_DEPENDS rospy std_msgs -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/lasr_object_detection_yolo.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/lasr_object_detection_yolo_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_lasr_object_detection_yolo.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/legacy/face_detection/README.md b/legacy/face_detection/README.md deleted file mode 100644 index ea1f83c96..000000000 --- a/legacy/face_detection/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# face_detection - -The face_detection package - -This package is maintained by: -- [nicole](mailto:nicole@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- rospy (build) -- std_msgs (build) -- rospy (exec) -- std_msgs (exec) -- lasr_vision_msgs - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -This package has no launch files. - -### Messages - -This package has no messages. - -### Services - -#### `FaceDetection` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| image_raw | sensor_msgs/Image | | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| detected_objects | lasr_vision_msgs/Detection[] | | - -#### `FaceDetectionPCL` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| cloud | sensor_msgs/PointCloud2 | | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| detections | face_detection/DetectionPCL[] | | - - -### Actions - -This package has no actions. diff --git a/legacy/face_detection/caffe_model/deploy.prototxt b/legacy/face_detection/caffe_model/deploy.prototxt deleted file mode 100644 index 77a4bdb1f..000000000 --- a/legacy/face_detection/caffe_model/deploy.prototxt +++ /dev/null @@ -1,1789 +0,0 @@ -input: "data" -input_shape { - dim: 1 - dim: 3 - dim: 300 - dim: 300 -} - -layer { - name: "data_bn" - type: "BatchNorm" - bottom: "data" - top: "data_bn" - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } -} -layer { - name: "data_scale" - type: "Scale" - bottom: "data_bn" - top: "data_bn" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 2.0 - decay_mult: 1.0 - } - scale_param { - bias_term: true - } -} -layer { - name: "conv1_h" - type: "Convolution" - bottom: "data_bn" - top: "conv1_h" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 2.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 3 - kernel_size: 7 - stride: 2 - weight_filler { - type: "msra" - variance_norm: FAN_OUT - } - bias_filler { - type: "constant" - value: 0.0 - } - } -} -layer { - name: "conv1_bn_h" - type: "BatchNorm" - bottom: "conv1_h" - top: "conv1_h" - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } -} -layer { - name: "conv1_scale_h" - type: "Scale" - bottom: "conv1_h" - top: "conv1_h" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 2.0 - decay_mult: 1.0 - } - scale_param { - bias_term: true - } -} -layer { - name: "conv1_relu" - type: "ReLU" - bottom: "conv1_h" - top: "conv1_h" -} -layer { - name: "conv1_pool" - type: "Pooling" - bottom: "conv1_h" - top: "conv1_pool" - pooling_param { - kernel_size: 3 - stride: 2 - } -} -layer { - name: "layer_64_1_conv1_h" - type: "Convolution" - bottom: "conv1_pool" - top: "layer_64_1_conv1_h" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - bias_term: false - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "msra" - } - bias_filler { - type: "constant" - value: 0.0 - } - } -} -layer { - name: "layer_64_1_bn2_h" - type: "BatchNorm" - bottom: "layer_64_1_conv1_h" - top: "layer_64_1_conv1_h" - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } -} -layer { - name: "layer_64_1_scale2_h" - type: "Scale" - bottom: "layer_64_1_conv1_h" - top: "layer_64_1_conv1_h" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 2.0 - decay_mult: 1.0 - } - scale_param { - bias_term: true - } -} -layer { - name: "layer_64_1_relu2" - type: "ReLU" - bottom: "layer_64_1_conv1_h" - top: "layer_64_1_conv1_h" -} -layer { - name: "layer_64_1_conv2_h" - type: "Convolution" - bottom: "layer_64_1_conv1_h" - top: "layer_64_1_conv2_h" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - bias_term: false - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "msra" - } - bias_filler { - type: "constant" - value: 0.0 - } - } -} -layer { - name: "layer_64_1_sum" - type: "Eltwise" - bottom: "layer_64_1_conv2_h" - bottom: "conv1_pool" - top: "layer_64_1_sum" -} -layer { - name: "layer_128_1_bn1_h" - type: "BatchNorm" - bottom: "layer_64_1_sum" - top: "layer_128_1_bn1_h" - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } -} -layer { - name: "layer_128_1_scale1_h" - type: "Scale" - bottom: "layer_128_1_bn1_h" - top: "layer_128_1_bn1_h" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 2.0 - decay_mult: 1.0 - } - scale_param { - bias_term: true - } -} -layer { - name: "layer_128_1_relu1" - type: "ReLU" - bottom: "layer_128_1_bn1_h" - top: "layer_128_1_bn1_h" -} -layer { - name: "layer_128_1_conv1_h" - type: "Convolution" - bottom: "layer_128_1_bn1_h" - top: "layer_128_1_conv1_h" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - bias_term: false - pad: 1 - kernel_size: 3 - stride: 2 - weight_filler { - type: "msra" - } - bias_filler { - type: "constant" - value: 0.0 - } - } -} -layer { - name: "layer_128_1_bn2" - type: "BatchNorm" - bottom: "layer_128_1_conv1_h" - top: "layer_128_1_conv1_h" - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } -} -layer { - name: "layer_128_1_scale2" - type: "Scale" - bottom: "layer_128_1_conv1_h" - top: "layer_128_1_conv1_h" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 2.0 - decay_mult: 1.0 - } - scale_param { - bias_term: true - } -} -layer { - name: "layer_128_1_relu2" - type: "ReLU" - bottom: "layer_128_1_conv1_h" - top: "layer_128_1_conv1_h" -} -layer { - name: "layer_128_1_conv2" - type: "Convolution" - bottom: "layer_128_1_conv1_h" - top: "layer_128_1_conv2" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - bias_term: false - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "msra" - } - bias_filler { - type: "constant" - value: 0.0 - } - } -} -layer { - name: "layer_128_1_conv_expand_h" - type: "Convolution" - bottom: "layer_128_1_bn1_h" - top: "layer_128_1_conv_expand_h" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - bias_term: false - pad: 0 - kernel_size: 1 - stride: 2 - weight_filler { - type: "msra" - } - bias_filler { - type: "constant" - value: 0.0 - } - } -} -layer { - name: "layer_128_1_sum" - type: "Eltwise" - bottom: "layer_128_1_conv2" - bottom: "layer_128_1_conv_expand_h" - top: "layer_128_1_sum" -} -layer { - name: "layer_256_1_bn1" - type: "BatchNorm" - bottom: "layer_128_1_sum" - top: "layer_256_1_bn1" - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } -} -layer { - name: "layer_256_1_scale1" - type: "Scale" - bottom: "layer_256_1_bn1" - top: "layer_256_1_bn1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 2.0 - decay_mult: 1.0 - } - scale_param { - bias_term: true - } -} -layer { - name: "layer_256_1_relu1" - type: "ReLU" - bottom: "layer_256_1_bn1" - top: "layer_256_1_bn1" -} -layer { - name: "layer_256_1_conv1" - type: "Convolution" - bottom: "layer_256_1_bn1" - top: "layer_256_1_conv1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 256 - bias_term: false - pad: 1 - kernel_size: 3 - stride: 2 - weight_filler { - type: "msra" - } - bias_filler { - type: "constant" - value: 0.0 - } - } -} -layer { - name: "layer_256_1_bn2" - type: "BatchNorm" - bottom: "layer_256_1_conv1" - top: "layer_256_1_conv1" - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } -} -layer { - name: "layer_256_1_scale2" - type: "Scale" - bottom: "layer_256_1_conv1" - top: "layer_256_1_conv1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 2.0 - decay_mult: 1.0 - } - scale_param { - bias_term: true - } -} -layer { - name: "layer_256_1_relu2" - type: "ReLU" - bottom: "layer_256_1_conv1" - top: "layer_256_1_conv1" -} -layer { - name: "layer_256_1_conv2" - type: "Convolution" - bottom: "layer_256_1_conv1" - top: "layer_256_1_conv2" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 256 - bias_term: false - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "msra" - } - bias_filler { - type: "constant" - value: 0.0 - } - } -} -layer { - name: "layer_256_1_conv_expand" - type: "Convolution" - bottom: "layer_256_1_bn1" - top: "layer_256_1_conv_expand" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 256 - bias_term: false - pad: 0 - kernel_size: 1 - stride: 2 - weight_filler { - type: "msra" - } - bias_filler { - type: "constant" - value: 0.0 - } - } -} -layer { - name: "layer_256_1_sum" - type: "Eltwise" - bottom: "layer_256_1_conv2" - bottom: "layer_256_1_conv_expand" - top: "layer_256_1_sum" -} -layer { - name: "layer_512_1_bn1" - type: "BatchNorm" - bottom: "layer_256_1_sum" - top: "layer_512_1_bn1" - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } -} -layer { - name: "layer_512_1_scale1" - type: "Scale" - bottom: "layer_512_1_bn1" - top: "layer_512_1_bn1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 2.0 - decay_mult: 1.0 - } - scale_param { - bias_term: true - } -} -layer { - name: "layer_512_1_relu1" - type: "ReLU" - bottom: "layer_512_1_bn1" - top: "layer_512_1_bn1" -} -layer { - name: "layer_512_1_conv1_h" - type: "Convolution" - bottom: "layer_512_1_bn1" - top: "layer_512_1_conv1_h" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - bias_term: false - pad: 1 - kernel_size: 3 - stride: 1 # 2 - weight_filler { - type: "msra" - } - bias_filler { - type: "constant" - value: 0.0 - } - } -} -layer { - name: "layer_512_1_bn2_h" - type: "BatchNorm" - bottom: "layer_512_1_conv1_h" - top: "layer_512_1_conv1_h" - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } -} -layer { - name: "layer_512_1_scale2_h" - type: "Scale" - bottom: "layer_512_1_conv1_h" - top: "layer_512_1_conv1_h" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 2.0 - decay_mult: 1.0 - } - scale_param { - bias_term: true - } -} -layer { - name: "layer_512_1_relu2" - type: "ReLU" - bottom: "layer_512_1_conv1_h" - top: "layer_512_1_conv1_h" -} -layer { - name: "layer_512_1_conv2_h" - type: "Convolution" - bottom: "layer_512_1_conv1_h" - top: "layer_512_1_conv2_h" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 256 - bias_term: false - pad: 2 # 1 - kernel_size: 3 - stride: 1 - dilation: 2 - weight_filler { - type: "msra" - } - bias_filler { - type: "constant" - value: 0.0 - } - } -} -layer { - name: "layer_512_1_conv_expand_h" - type: "Convolution" - bottom: "layer_512_1_bn1" - top: "layer_512_1_conv_expand_h" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 256 - bias_term: false - pad: 0 - kernel_size: 1 - stride: 1 # 2 - weight_filler { - type: "msra" - } - bias_filler { - type: "constant" - value: 0.0 - } - } -} -layer { - name: "layer_512_1_sum" - type: "Eltwise" - bottom: "layer_512_1_conv2_h" - bottom: "layer_512_1_conv_expand_h" - top: "layer_512_1_sum" -} -layer { - name: "last_bn_h" - type: "BatchNorm" - bottom: "layer_512_1_sum" - top: "layer_512_1_sum" - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } -} -layer { - name: "last_scale_h" - type: "Scale" - bottom: "layer_512_1_sum" - top: "layer_512_1_sum" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 2.0 - decay_mult: 1.0 - } - scale_param { - bias_term: true - } -} -layer { - name: "last_relu" - type: "ReLU" - bottom: "layer_512_1_sum" - top: "fc7" -} - -layer { - name: "conv6_1_h" - type: "Convolution" - bottom: "fc7" - top: "conv6_1_h" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 128 - pad: 0 - kernel_size: 1 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv6_1_relu" - type: "ReLU" - bottom: "conv6_1_h" - top: "conv6_1_h" -} -layer { - name: "conv6_2_h" - type: "Convolution" - bottom: "conv6_1_h" - top: "conv6_2_h" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 256 - pad: 1 - kernel_size: 3 - stride: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv6_2_relu" - type: "ReLU" - bottom: "conv6_2_h" - top: "conv6_2_h" -} -layer { - name: "conv7_1_h" - type: "Convolution" - bottom: "conv6_2_h" - top: "conv7_1_h" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 64 - pad: 0 - kernel_size: 1 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv7_1_relu" - type: "ReLU" - bottom: "conv7_1_h" - top: "conv7_1_h" -} -layer { - name: "conv7_2_h" - type: "Convolution" - bottom: "conv7_1_h" - top: "conv7_2_h" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 128 - pad: 1 - kernel_size: 3 - stride: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv7_2_relu" - type: "ReLU" - bottom: "conv7_2_h" - top: "conv7_2_h" -} -layer { - name: "conv8_1_h" - type: "Convolution" - bottom: "conv7_2_h" - top: "conv8_1_h" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 64 - pad: 0 - kernel_size: 1 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv8_1_relu" - type: "ReLU" - bottom: "conv8_1_h" - top: "conv8_1_h" -} -layer { - name: "conv8_2_h" - type: "Convolution" - bottom: "conv8_1_h" - top: "conv8_2_h" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 128 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv8_2_relu" - type: "ReLU" - bottom: "conv8_2_h" - top: "conv8_2_h" -} -layer { - name: "conv9_1_h" - type: "Convolution" - bottom: "conv8_2_h" - top: "conv9_1_h" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 64 - pad: 0 - kernel_size: 1 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv9_1_relu" - type: "ReLU" - bottom: "conv9_1_h" - top: "conv9_1_h" -} -layer { - name: "conv9_2_h" - type: "Convolution" - bottom: "conv9_1_h" - top: "conv9_2_h" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 128 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv9_2_relu" - type: "ReLU" - bottom: "conv9_2_h" - top: "conv9_2_h" -} -layer { - name: "conv4_3_norm" - type: "Normalize" - bottom: "layer_256_1_bn1" - top: "conv4_3_norm" - norm_param { - across_spatial: false - scale_filler { - type: "constant" - value: 20 - } - channel_shared: false - } -} -layer { - name: "conv4_3_norm_mbox_loc" - type: "Convolution" - bottom: "conv4_3_norm" - top: "conv4_3_norm_mbox_loc" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 16 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv4_3_norm_mbox_loc_perm" - type: "Permute" - bottom: "conv4_3_norm_mbox_loc" - top: "conv4_3_norm_mbox_loc_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "conv4_3_norm_mbox_loc_flat" - type: "Flatten" - bottom: "conv4_3_norm_mbox_loc_perm" - top: "conv4_3_norm_mbox_loc_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "conv4_3_norm_mbox_conf" - type: "Convolution" - bottom: "conv4_3_norm" - top: "conv4_3_norm_mbox_conf" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 8 # 84 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv4_3_norm_mbox_conf_perm" - type: "Permute" - bottom: "conv4_3_norm_mbox_conf" - top: "conv4_3_norm_mbox_conf_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "conv4_3_norm_mbox_conf_flat" - type: "Flatten" - bottom: "conv4_3_norm_mbox_conf_perm" - top: "conv4_3_norm_mbox_conf_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "conv4_3_norm_mbox_priorbox" - type: "PriorBox" - bottom: "conv4_3_norm" - bottom: "data" - top: "conv4_3_norm_mbox_priorbox" - prior_box_param { - min_size: 30.0 - max_size: 60.0 - aspect_ratio: 2 - flip: true - clip: false - variance: 0.1 - variance: 0.1 - variance: 0.2 - variance: 0.2 - step: 8 - offset: 0.5 - } -} -layer { - name: "fc7_mbox_loc" - type: "Convolution" - bottom: "fc7" - top: "fc7_mbox_loc" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 24 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "fc7_mbox_loc_perm" - type: "Permute" - bottom: "fc7_mbox_loc" - top: "fc7_mbox_loc_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "fc7_mbox_loc_flat" - type: "Flatten" - bottom: "fc7_mbox_loc_perm" - top: "fc7_mbox_loc_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "fc7_mbox_conf" - type: "Convolution" - bottom: "fc7" - top: "fc7_mbox_conf" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 12 # 126 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "fc7_mbox_conf_perm" - type: "Permute" - bottom: "fc7_mbox_conf" - top: "fc7_mbox_conf_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "fc7_mbox_conf_flat" - type: "Flatten" - bottom: "fc7_mbox_conf_perm" - top: "fc7_mbox_conf_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "fc7_mbox_priorbox" - type: "PriorBox" - bottom: "fc7" - bottom: "data" - top: "fc7_mbox_priorbox" - prior_box_param { - min_size: 60.0 - max_size: 111.0 - aspect_ratio: 2 - aspect_ratio: 3 - flip: true - clip: false - variance: 0.1 - variance: 0.1 - variance: 0.2 - variance: 0.2 - step: 16 - offset: 0.5 - } -} -layer { - name: "conv6_2_mbox_loc" - type: "Convolution" - bottom: "conv6_2_h" - top: "conv6_2_mbox_loc" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 24 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv6_2_mbox_loc_perm" - type: "Permute" - bottom: "conv6_2_mbox_loc" - top: "conv6_2_mbox_loc_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "conv6_2_mbox_loc_flat" - type: "Flatten" - bottom: "conv6_2_mbox_loc_perm" - top: "conv6_2_mbox_loc_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "conv6_2_mbox_conf" - type: "Convolution" - bottom: "conv6_2_h" - top: "conv6_2_mbox_conf" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 12 # 126 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv6_2_mbox_conf_perm" - type: "Permute" - bottom: "conv6_2_mbox_conf" - top: "conv6_2_mbox_conf_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "conv6_2_mbox_conf_flat" - type: "Flatten" - bottom: "conv6_2_mbox_conf_perm" - top: "conv6_2_mbox_conf_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "conv6_2_mbox_priorbox" - type: "PriorBox" - bottom: "conv6_2_h" - bottom: "data" - top: "conv6_2_mbox_priorbox" - prior_box_param { - min_size: 111.0 - max_size: 162.0 - aspect_ratio: 2 - aspect_ratio: 3 - flip: true - clip: false - variance: 0.1 - variance: 0.1 - variance: 0.2 - variance: 0.2 - step: 32 - offset: 0.5 - } -} -layer { - name: "conv7_2_mbox_loc" - type: "Convolution" - bottom: "conv7_2_h" - top: "conv7_2_mbox_loc" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 24 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv7_2_mbox_loc_perm" - type: "Permute" - bottom: "conv7_2_mbox_loc" - top: "conv7_2_mbox_loc_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "conv7_2_mbox_loc_flat" - type: "Flatten" - bottom: "conv7_2_mbox_loc_perm" - top: "conv7_2_mbox_loc_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "conv7_2_mbox_conf" - type: "Convolution" - bottom: "conv7_2_h" - top: "conv7_2_mbox_conf" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 12 # 126 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv7_2_mbox_conf_perm" - type: "Permute" - bottom: "conv7_2_mbox_conf" - top: "conv7_2_mbox_conf_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "conv7_2_mbox_conf_flat" - type: "Flatten" - bottom: "conv7_2_mbox_conf_perm" - top: "conv7_2_mbox_conf_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "conv7_2_mbox_priorbox" - type: "PriorBox" - bottom: "conv7_2_h" - bottom: "data" - top: "conv7_2_mbox_priorbox" - prior_box_param { - min_size: 162.0 - max_size: 213.0 - aspect_ratio: 2 - aspect_ratio: 3 - flip: true - clip: false - variance: 0.1 - variance: 0.1 - variance: 0.2 - variance: 0.2 - step: 64 - offset: 0.5 - } -} -layer { - name: "conv8_2_mbox_loc" - type: "Convolution" - bottom: "conv8_2_h" - top: "conv8_2_mbox_loc" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 16 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv8_2_mbox_loc_perm" - type: "Permute" - bottom: "conv8_2_mbox_loc" - top: "conv8_2_mbox_loc_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "conv8_2_mbox_loc_flat" - type: "Flatten" - bottom: "conv8_2_mbox_loc_perm" - top: "conv8_2_mbox_loc_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "conv8_2_mbox_conf" - type: "Convolution" - bottom: "conv8_2_h" - top: "conv8_2_mbox_conf" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 8 # 84 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv8_2_mbox_conf_perm" - type: "Permute" - bottom: "conv8_2_mbox_conf" - top: "conv8_2_mbox_conf_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "conv8_2_mbox_conf_flat" - type: "Flatten" - bottom: "conv8_2_mbox_conf_perm" - top: "conv8_2_mbox_conf_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "conv8_2_mbox_priorbox" - type: "PriorBox" - bottom: "conv8_2_h" - bottom: "data" - top: "conv8_2_mbox_priorbox" - prior_box_param { - min_size: 213.0 - max_size: 264.0 - aspect_ratio: 2 - flip: true - clip: false - variance: 0.1 - variance: 0.1 - variance: 0.2 - variance: 0.2 - step: 100 - offset: 0.5 - } -} -layer { - name: "conv9_2_mbox_loc" - type: "Convolution" - bottom: "conv9_2_h" - top: "conv9_2_mbox_loc" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 16 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv9_2_mbox_loc_perm" - type: "Permute" - bottom: "conv9_2_mbox_loc" - top: "conv9_2_mbox_loc_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "conv9_2_mbox_loc_flat" - type: "Flatten" - bottom: "conv9_2_mbox_loc_perm" - top: "conv9_2_mbox_loc_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "conv9_2_mbox_conf" - type: "Convolution" - bottom: "conv9_2_h" - top: "conv9_2_mbox_conf" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 8 # 84 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv9_2_mbox_conf_perm" - type: "Permute" - bottom: "conv9_2_mbox_conf" - top: "conv9_2_mbox_conf_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "conv9_2_mbox_conf_flat" - type: "Flatten" - bottom: "conv9_2_mbox_conf_perm" - top: "conv9_2_mbox_conf_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "conv9_2_mbox_priorbox" - type: "PriorBox" - bottom: "conv9_2_h" - bottom: "data" - top: "conv9_2_mbox_priorbox" - prior_box_param { - min_size: 264.0 - max_size: 315.0 - aspect_ratio: 2 - flip: true - clip: false - variance: 0.1 - variance: 0.1 - variance: 0.2 - variance: 0.2 - step: 300 - offset: 0.5 - } -} -layer { - name: "mbox_loc" - type: "Concat" - bottom: "conv4_3_norm_mbox_loc_flat" - bottom: "fc7_mbox_loc_flat" - bottom: "conv6_2_mbox_loc_flat" - bottom: "conv7_2_mbox_loc_flat" - bottom: "conv8_2_mbox_loc_flat" - bottom: "conv9_2_mbox_loc_flat" - top: "mbox_loc" - concat_param { - axis: 1 - } -} -layer { - name: "mbox_conf" - type: "Concat" - bottom: "conv4_3_norm_mbox_conf_flat" - bottom: "fc7_mbox_conf_flat" - bottom: "conv6_2_mbox_conf_flat" - bottom: "conv7_2_mbox_conf_flat" - bottom: "conv8_2_mbox_conf_flat" - bottom: "conv9_2_mbox_conf_flat" - top: "mbox_conf" - concat_param { - axis: 1 - } -} -layer { - name: "mbox_priorbox" - type: "Concat" - bottom: "conv4_3_norm_mbox_priorbox" - bottom: "fc7_mbox_priorbox" - bottom: "conv6_2_mbox_priorbox" - bottom: "conv7_2_mbox_priorbox" - bottom: "conv8_2_mbox_priorbox" - bottom: "conv9_2_mbox_priorbox" - top: "mbox_priorbox" - concat_param { - axis: 2 - } -} - -layer { - name: "mbox_conf_reshape" - type: "Reshape" - bottom: "mbox_conf" - top: "mbox_conf_reshape" - reshape_param { - shape { - dim: 0 - dim: -1 - dim: 2 - } - } -} -layer { - name: "mbox_conf_softmax" - type: "Softmax" - bottom: "mbox_conf_reshape" - top: "mbox_conf_softmax" - softmax_param { - axis: 2 - } -} -layer { - name: "mbox_conf_flatten" - type: "Flatten" - bottom: "mbox_conf_softmax" - top: "mbox_conf_flatten" - flatten_param { - axis: 1 - } -} - -layer { - name: "detection_out" - type: "DetectionOutput" - bottom: "mbox_loc" - bottom: "mbox_conf_flatten" - bottom: "mbox_priorbox" - top: "detection_out" - include { - phase: TEST - } - detection_output_param { - num_classes: 2 - share_location: true - background_label_id: 0 - nms_param { - nms_threshold: 0.45 - top_k: 400 - } - code_type: CENTER_SIZE - keep_top_k: 200 - confidence_threshold: 0.01 - } -} \ No newline at end of file diff --git a/legacy/face_detection/caffe_model/res10_300x300_ssd_iter_140000.caffemodel b/legacy/face_detection/caffe_model/res10_300x300_ssd_iter_140000.caffemodel deleted file mode 100644 index 809dfd787..000000000 Binary files a/legacy/face_detection/caffe_model/res10_300x300_ssd_iter_140000.caffemodel and /dev/null differ diff --git a/legacy/face_detection/nn4/nn4.small2.v1.t7 b/legacy/face_detection/nn4/nn4.small2.v1.t7 deleted file mode 100644 index 8a0d01975..000000000 Binary files a/legacy/face_detection/nn4/nn4.small2.v1.t7 and /dev/null differ diff --git a/legacy/face_detection/package.xml b/legacy/face_detection/package.xml deleted file mode 100644 index bfb1b723e..000000000 --- a/legacy/face_detection/package.xml +++ /dev/null @@ -1,66 +0,0 @@ - - - face_detection - 0.0.0 - The face_detection package - - - - - nicole - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - rospy - std_msgs - rospy - std_msgs - rospy - std_msgs - lasr_vision_msgs - - - - - - - - \ No newline at end of file diff --git a/legacy/face_detection/setup.py b/legacy/face_detection/setup.py deleted file mode 100644 index 2ff4f999c..000000000 --- a/legacy/face_detection/setup.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python3 - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -setup_args = generate_distutils_setup( - packages=['face_detection'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/face_detection/src/face_detection/face_detection_server.py b/legacy/face_detection/src/face_detection/face_detection_server.py deleted file mode 100755 index 3b9ea318b..000000000 --- a/legacy/face_detection/src/face_detection/face_detection_server.py +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/env python3 -from genericpath import exists -import rospy -import rospkg - -from face_detection.srv import FaceDetection, FaceDetectionResponse, \ - FaceDetectionRequest -from lasr_vision_msgs.msg import Detection -import os -import numpy as np -import cv2 -import pickle -import imutils -from cv_bridge3 import CvBridge - -NET = os.path.join(rospkg.RosPack().get_path('face_detection'),'nn4', "nn4.small2.v1.t7") -PROTO_PATH = os.path.join(rospkg.RosPack().get_path('face_detection'), 'caffe_model', "deploy.prototxt") -MODEL_PATH = os.path.join(rospkg.RosPack().get_path('face_detection'), 'caffe_model', - "res10_300x300_ssd_iter_140000.caffemodel") -OUTPUT_PATH = os.path.join(rospkg.RosPack().get_path('face_detection'), "output") - -CONFIDENCE_THRESHOLD = 0.5 - -# -# def delete_model(): -# if os.path.exists(os.path.join(OUTPUT_PATH, "recognizer.pickle")): -# os.path.remove(os.path.join(OUTPUT_PATH, "recognizer.pickle")) -# if os.path.exists(os.path.join(OUTPUT_PATH, "le.pickle")): -# os.path.remove(os.path.join(OUTPUT_PATH, "le.pickle")) - - -class FaceDetectionServer: - """ - A Server for performing face detection and classification with OpenCV. - """ - - def __init__(self): - - # Load network and models. - self.detector = cv2.dnn.readNetFromCaffe(PROTO_PATH, MODEL_PATH) - self.embedder = cv2.dnn.readNetFromTorch(NET) - - # Bridge for conversion between cv2 and sensor_msgs/Image, and vice versa. - self.bridge = CvBridge() - # delete_model() - self.recognizer = None - self.le = None - - def load_model(self): - if not os.path.exists(os.path.join(OUTPUT_PATH, "recognizer.pickle")): - self.recognizer = None - else: - with open(os.path.join(OUTPUT_PATH, "recognizer.pickle"), "rb") as fp: - self.recognizer = pickle.loads(fp.read()) - if not os.path.exists(os.path.join(OUTPUT_PATH, "le.pickle")): - self.le = None - else: - with open(os.path.join(OUTPUT_PATH, "le.pickle"), "rb") as fp: - self.le = pickle.loads(fp.read()) - print(self.le.classes_) - - if self.recognizer and self.le: - return True - - def __call__(self, req): - """ - Core method of server. - """ - - if not self.recognizer or not self.le: - if not self.load_model(): - raise rospy.ServiceException("No model to load from.") - - # Construct empty response. - if isinstance(req, FaceDetectionRequest): - response = FaceDetectionResponse() - - # Convert sensor_msgs/Image to cv2 image. - print('i am here') - cv_image = self.bridge.imgmsg_to_cv2_np(req.image_raw) - - # else: - # response = FaceDetectionPCLResponse() - # - # # Extract rgb image from pointcloud - # cv_image = np.fromstring(req.cloud.data, dtype=np.uint8) - # cv_image = cv_image.reshape(req.cloud.height, req.cloud.width, 32) - # cv_image = cv_image[:, :, 16:19] - # - # # Ensure array is contiguous - # cv_image = np.ascontiguousarray(cv_image, dtype=np.uint8) - - # Resize for input to the network. - cv_image = imutils.resize(cv_image, width=600) - h, w = cv_image.shape[:2] - - # Construct a blob from the image. - blob = cv2.dnn.blobFromImage( - cv2.resize(cv_image, (300, 300)), - 1.0, (300, 300), - (104.0, 177.0, 123.0), - swapRB=False, crop=False - ) - - # Apply OpenCV's deep learning-based face detector to localize - # faces in the input image - self.detector.setInput(blob) - detections = self.detector.forward() - - print(detections, 'the face detections') - - # Iterate detections - for detection in detections[0][0]: - - # Extract confidence. - confidence = detection[2] - - # Ensure confidence of detection is above specified threshold. - if confidence > CONFIDENCE_THRESHOLD: - - # Compute bounding box coordinates. - face_bb = detection[3:7] * np.array([w, h, w, h]) - x1, y1, x2, y2 = face_bb.astype("int") - - # Extract face. - face = cv_image[y1:y2, x1:x2] - - face_width, face_height = face.shape[:2] - - # Ensure face width and height are sufficiently large. - if face_width < 20 or face_height < 20: continue - - # Construct a blob from the face. - faceBlob = cv2.dnn.blobFromImage( - face, 1.0 / 255, (96, 96), - (0, 0, 0), - swapRB=True, crop=False - ) - - # Pass blob through face embedding model - # to obtain the 128-d quantification of the face. - self.embedder.setInput(faceBlob) - vec = self.embedder.forward() - - # Perform classification to recognise the face. - predictions = self.recognizer.predict_proba(vec)[0] - j = np.argmax(predictions) - prob = predictions[j] - name = self.le.classes_[j] - print(name, prob, 'hello there') - - # Append the detection to the response. - if isinstance(req, FaceDetectionRequest): - response.detected_objects.append(Detection(name, prob, [x1, y1, x2, y2])) - # else: - # centroid = bb_to_centroid(req.cloud, x1, y1, x2 - x1, y2 - y1) - # response.detections.append(DetectionPCL(name, prob, [x1, y1, x2, y2], centroid)) - # print('the resp is ', response) - return response - - -if __name__ == "__main__": - rospy.init_node("face_detection_server") - server = FaceDetectionServer() - service = rospy.Service('face_detection_server', FaceDetection, server) - # service_pcl = rospy.Service('face_detection_pcl', FaceDetectionPCL, server) - rospy.loginfo("Face Detection Service initialised") - rospy.spin() \ No newline at end of file diff --git a/legacy/face_detection/srv/FaceDetection.srv b/legacy/face_detection/srv/FaceDetection.srv deleted file mode 100755 index 52b8d5ce6..000000000 --- a/legacy/face_detection/srv/FaceDetection.srv +++ /dev/null @@ -1,3 +0,0 @@ -sensor_msgs/Image image_raw ---- -lasr_vision_msgs/Detection[] detected_objects diff --git a/legacy/face_detection/srv/FaceDetectionPCL.srv b/legacy/face_detection/srv/FaceDetectionPCL.srv deleted file mode 100644 index 4a3b5dde2..000000000 --- a/legacy/face_detection/srv/FaceDetectionPCL.srv +++ /dev/null @@ -1,3 +0,0 @@ -sensor_msgs/PointCloud2 cloud ---- -face_detection/DetectionPCL[] detections \ No newline at end of file diff --git a/legacy/find_person_and_ask_open_door/CMakeLists.txt b/legacy/find_person_and_ask_open_door/CMakeLists.txt deleted file mode 100644 index c42d8426d..000000000 --- a/legacy/find_person_and_ask_open_door/CMakeLists.txt +++ /dev/null @@ -1,204 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(find_person_and_ask_open_door) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - rospy -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder -# add_service_files( -# FILES -# Service1.srv -# Service2.srv -# ) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -# generate_messages( -# DEPENDENCIES -# std_msgs # Or other packages containing msgs -# ) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES find_person_and_ask_open_door -# CATKIN_DEPENDS rospy -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/find_person_and_ask_open_door.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/find_person_and_ask_open_door_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_find_person_and_ask_open_door.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/legacy/find_person_and_ask_open_door/README.md b/legacy/find_person_and_ask_open_door/README.md deleted file mode 100644 index 1a73ef83d..000000000 --- a/legacy/find_person_and_ask_open_door/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# find_person_and_ask_open_door - -Goes to a specified door and searches for a person. When person is found, ask person - to open the door. - -This package is maintained by: -- [elisabeth](mailto:elisabeth@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- rospy (build) -- rospy (exec) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -#### `find_person_and_ask` - -No description provided. - - -### Messages - -This package has no messages. - -### Services - -This package has no services. - -### Actions - -This package has no actions. diff --git a/legacy/find_person_and_ask_open_door/config/map_points.yaml b/legacy/find_person_and_ask_open_door/config/map_points.yaml deleted file mode 100644 index c0f2744be..000000000 --- a/legacy/find_person_and_ask_open_door/config/map_points.yaml +++ /dev/null @@ -1,22 +0,0 @@ -door_simu: - position: - x: 6.133732680056999 - y: -0.7289013393838328 - z: 0.0 - orientation: - x: 0.0 - y: 0.0 - z: 0.028572161371893147 - w: 0.9995917324560756 - -door: - position: - x: -0.8110280734845722 - y: -1.7996335500178748 - z: 0.0 - orientation: - x: 0.0 - y: 0.0 - z: -0.9671729273733352 - w: 0.2541191227674403 - diff --git a/legacy/find_person_and_ask_open_door/launch/find_person_and_ask.launch b/legacy/find_person_and_ask_open_door/launch/find_person_and_ask.launch deleted file mode 100644 index 0ace32add..000000000 --- a/legacy/find_person_and_ask_open_door/launch/find_person_and_ask.launch +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/legacy/find_person_and_ask_open_door/package.xml b/legacy/find_person_and_ask_open_door/package.xml deleted file mode 100644 index f9528aaa7..000000000 --- a/legacy/find_person_and_ask_open_door/package.xml +++ /dev/null @@ -1,63 +0,0 @@ - - - find_person_and_ask_open_door - 0.0.0 - Goes to a specified door and searches for a person. When person is found, ask person - to open the door. - - - - - elisabeth - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - rospy - rospy - rospy - - - - - - - - \ No newline at end of file diff --git a/legacy/find_person_and_ask_open_door/setup.py b/legacy/find_person_and_ask_open_door/setup.py deleted file mode 100644 index 71fec1ae6..000000000 --- a/legacy/find_person_and_ask_open_door/setup.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python3 - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -setup_args = generate_distutils_setup( - packages=['find_person_and_ask_open_door'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/find_person_and_ask_open_door/src/find_person_and_ask_open_door/find_person_and_ask.py b/legacy/find_person_and_ask_open_door/src/find_person_and_ask_open_door/find_person_and_ask.py deleted file mode 100755 index 7b1ab114f..000000000 --- a/legacy/find_person_and_ask_open_door/src/find_person_and_ask_open_door/find_person_and_ask.py +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env python3 -import rospy -from tiago_controllers.controllers.base_controller import CmdVelController -# from tiago_controllers.head_controller import HeadController -from tiago_controllers.controllers.look_at import LookAt -# from lasr_voice.voice import Voice -from tiago_controllers.helpers import get_pose_from_param -from lasr_object_detection_yolo.detect_objects import detect_objects -from tiago_controllers.controllers import Controllers - -# TODO: make statemachine -# TODO: add simulation:= true/false in launch file - -HORIZONTAL = 0.8 -VERTICAL = 0.3 - - -class FindPersonAndAsk: - def __init__(self): - self.controllers = Controllers() - self.base_controller = self.controllers.base_controller - self.head_controller = self.controllers.head_controller - self.cmd_vel_controller = CmdVelController() - - self.head_controller.sync_reach_to(0, 0) # start by centering the head - # self.voice = Voice() - self.search_points = [(-1 * HORIZONTAL, VERTICAL), - (0, VERTICAL), - (HORIZONTAL, VERTICAL), - (HORIZONTAL, 0), - (0, 0), - (-1 * HORIZONTAL, 0), - (-1 * HORIZONTAL, -1 * VERTICAL), - (0, -1 * VERTICAL), - (HORIZONTAL, -1 * VERTICAL), - (0, 0)] - - def search_for_person(self): - for point in self.search_points: - self.head_controller.sync_reach_to(point[0], point[1]) - people = detect_objects(["person"], "coco") - if people: - return people[0], point[0] - return None - - # ! State machine 1: go to place - # ! State machine 2: search - # ! State machine 3: turn - # ! State machine 4: talk - def main(self): - # door_pos = get_pose_from_param('/door_simu') - # door_pos = get_pose_from_param('/door') - # self.base_controller.sync_to_pose(door_pos) - cmd_vel = CmdVelController() - turns = 3 - for i in range(turns): - try: - closest_person, head_rot = self.search_for_person() - if closest_person: - print("FOUND PERSON") - look_at = LookAt(self.head_controller, self.base_controller, self.cmd_vel_controller, "person") - look_at.look_at(closest_person.xywh, head_rot) - print("CAN YOU PLEASE OPEN THE DOOR") - self.voice.sync_tts("Can you please open the door for me?") - return - except TypeError: - cmd_vel.rotate(60, 360/turns, True) - - print("I CANT SEE ANYONE") - self.voice.sync_tts("I can't see anyone!") - - -if __name__ == '__main__': - rospy.init_node("find_person_and_ask_open_door_node", anonymous=True) - find_and_ask = FindPersonAndAsk() - find_and_ask.main() diff --git a/legacy/face_detection/src/face_detection/__init__.py b/legacy/graph_room_navigation/CATKIN_IGNORE similarity index 100% rename from legacy/face_detection/src/face_detection/__init__.py rename to legacy/graph_room_navigation/CATKIN_IGNORE diff --git a/legacy/graph_room_navigation/package.xml b/legacy/graph_room_navigation/package.xml index 9fb8f1a0b..0e58bf540 100644 --- a/legacy/graph_room_navigation/package.xml +++ b/legacy/graph_room_navigation/package.xml @@ -7,7 +7,7 @@ - jared + Jared Swift diff --git a/legacy/interaction_module/CMakeLists.txt b/legacy/interaction_module/CMakeLists.txt deleted file mode 100644 index 116462ac9..000000000 --- a/legacy/interaction_module/CMakeLists.txt +++ /dev/null @@ -1,207 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(interaction_module) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - geometry_msgs - roscpp - rospy - std_msgs - message_generation -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder - add_service_files( - FILES - AudioAndTextInteraction.srv -) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here - generate_messages( - DEPENDENCIES - std_msgs - ) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES interaction_module -# CATKIN_DEPENDS geometry_msgs roscpp rospy std_msgs -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/interaction_module.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/interaction_module_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_interaction_module.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/legacy/interaction_module/README.md b/legacy/interaction_module/README.md deleted file mode 100644 index 36aa36cbc..000000000 --- a/legacy/interaction_module/README.md +++ /dev/null @@ -1,70 +0,0 @@ -# interaction_module - -The interaction_module package - -This package is maintained by: -- [nicole](mailto:nicole@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- geometry_msgs (build) -- roscpp (build) -- rospy (build) -- std_msgs (build) -- geometry_msgs (exec) -- roscpp (exec) -- rospy (exec) -- std_msgs (exec) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -#### `interaction` - -No description provided. - - -### Messages - -This package has no messages. - -### Services - -#### `AudioAndTextInteraction` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| action | string | | -| subaction | string | | -| query_text | string | | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| status | bool | | -| result | string | | - - -### Actions - -This package has no actions. diff --git a/legacy/interaction_module/launch/interaction.launch b/legacy/interaction_module/launch/interaction.launch deleted file mode 100644 index cb166d55e..000000000 --- a/legacy/interaction_module/launch/interaction.launch +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/legacy/interaction_module/package.xml b/legacy/interaction_module/package.xml deleted file mode 100644 index 60c8ddc32..000000000 --- a/legacy/interaction_module/package.xml +++ /dev/null @@ -1,71 +0,0 @@ - - - interaction_module - 0.0.0 - The interaction_module package - - - - - nicole - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - geometry_msgs - roscpp - rospy - std_msgs - geometry_msgs - roscpp - rospy - std_msgs - geometry_msgs - roscpp - rospy - std_msgs - - - - - - - - diff --git a/legacy/interaction_module/setup.py b/legacy/interaction_module/setup.py deleted file mode 100644 index 3463b4cd6..000000000 --- a/legacy/interaction_module/setup.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python3 - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -# fetch values from package.xml -setup_args = generate_distutils_setup( - packages=['interaction_module'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/interaction_module/src/interaction_module/interaction_module_srv.py b/legacy/interaction_module/src/interaction_module/interaction_module_srv.py deleted file mode 100755 index 3c0e26c52..000000000 --- a/legacy/interaction_module/src/interaction_module/interaction_module_srv.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env python3 -import rospy -from interaction_module.srv import AudioAndTextInteraction, AudioAndTextInteractionResponse -from listen_module.srv import DialogListen, DialogListenRequest, DialogListenResponse - - -class InteractionModule: - def __init__(self): - self.dialog_srv = rospy.ServiceProxy("/dialogflow_server", DialogListen) - - self.interaction_module_srv = rospy.Service("/interaction_module", AudioAndTextInteraction, self.interaction_module_cb) - - def interaction_module_cb(self, request): - rospy.loginfo("Received audio request") - resp = AudioAndTextInteractionResponse() - print(request.action, request.subaction, request.query_text) - result = self.dialog_srv(request.action, request.subaction, request.query_text) - if result: - resp.status = result.status - resp.result = result.result - # possibly call to talk here - return resp - - -if __name__ == '__main__': - rospy.init_node("interaction_module") - rospy.loginfo("Interaction Module is up and running") - interaction_module = InteractionModule() - rospy.spin() \ No newline at end of file diff --git a/legacy/interaction_module/src/interaction_module/test_interaction.py b/legacy/interaction_module/src/interaction_module/test_interaction.py deleted file mode 100755 index b9ac83198..000000000 --- a/legacy/interaction_module/src/interaction_module/test_interaction.py +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env python3 -import rospy - -rospy.init_node("test") - -from interaction_module.srv import AudioAndTextInteraction - -speech = rospy.ServiceProxy("/interaction_module", AudioAndTextInteraction) -# print(speech("ROOM_REQUEST", "ask_location", "Can you show me where the 609 office is")) -print(speech("ROOM_REQUEST", "ask_location", "SOUND:PLAYING:PLEASE")) diff --git a/legacy/interaction_module/srv/AudioAndTextInteraction.srv b/legacy/interaction_module/srv/AudioAndTextInteraction.srv deleted file mode 100644 index 8fb8ea81c..000000000 --- a/legacy/interaction_module/srv/AudioAndTextInteraction.srv +++ /dev/null @@ -1,7 +0,0 @@ -string action -string subaction -string query_text ---- -bool status -string result - diff --git a/legacy/lasr_dialogflow/CMakeLists.txt b/legacy/lasr_dialogflow/CMakeLists.txt deleted file mode 100644 index 9eaba0e7e..000000000 --- a/legacy/lasr_dialogflow/CMakeLists.txt +++ /dev/null @@ -1,202 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(lasr_dialogflow) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS message_generation message_runtime rospy std_msgs) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder -add_service_files( - FILES - DialogflowAudio.srv - DialogflowText.srv -) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -generate_messages( - DEPENDENCIES - std_msgs # Or other packages containing msgs -) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES lasr_dialogflow -# CATKIN_DEPENDS other_catkin_pkg -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include -# ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/lasr_dialogflow.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/lasr_dialogflow_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_lasr_dialogflow.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/legacy/lasr_dialogflow/README.md b/legacy/lasr_dialogflow/README.md deleted file mode 100644 index faf2964d5..000000000 --- a/legacy/lasr_dialogflow/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# lasr_dialogflow - -The lasr_dialogflow package - -This package is maintained by: -- [jared](mailto:jared@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -This package has no launch files. - -### Messages - -This package has no messages. - -### Services - -#### `DialogflowAudio` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| task | std_msgs/String | | -| action | std_msgs/String | | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| result | std_msgs/String | | -| success | std_msgs/Bool | | - -#### `DialogflowText` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| task | std_msgs/String | | -| action | std_msgs/String | | -| query_text | std_msgs/String | | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| result | std_msgs/String | | -| success | std_msgs/Bool | | - - -### Actions - -This package has no actions. diff --git a/legacy/lasr_dialogflow/setup.py b/legacy/lasr_dialogflow/setup.py deleted file mode 100644 index 3d36d5694..000000000 --- a/legacy/lasr_dialogflow/setup.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python3 - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -setup_args = generate_distutils_setup( - packages=['lasr_dialogflow'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/lasr_dialogflow/src/lasr_dialogflow/actions/__init__.py b/legacy/lasr_dialogflow/src/lasr_dialogflow/actions/__init__.py deleted file mode 100644 index aaec110cf..000000000 --- a/legacy/lasr_dialogflow/src/lasr_dialogflow/actions/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from lasr_dialogflow.actions.base import BaseAction -from lasr_dialogflow.actions.receptionist import ReceptionistAction \ No newline at end of file diff --git a/legacy/lasr_dialogflow/src/lasr_dialogflow/actions/base.py b/legacy/lasr_dialogflow/src/lasr_dialogflow/actions/base.py deleted file mode 100644 index 700b709a0..000000000 --- a/legacy/lasr_dialogflow/src/lasr_dialogflow/actions/base.py +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env python3 - -from lasr_dialogflow.dialogflow_client_stream import DialogflowClientStream -import uuid -import dialogflow - -class BaseAction(): - - def __init__(self, project_id, df_lang_id="en"): - - self.project_id = project_id - self.df_lang_id = df_lang_id - self.attempts = 0 - self.max_attempts = 3 - self.streaming_client = None - - self.session_id = uuid.uuid4() - - self.streaming_client = DialogflowClientStream( - self.project_id, - self.session_id, - language_code=self.df_lang_id - ) - - def stop(): - self.streaming_client.stop() - - def listen_in_context(self, context=None): - - query_params = None - if context: - query_params = dialogflow.types.session_pb2.QueryParameters(contexts=[self.get_context(context)]) - - response = None - for response in self.streaming_client.response_generator(query_params): - if response.recognition_result.message_type == dialogflow.enums.StreamingRecognitionResult.MessageType.END_OF_SINGLE_UTTERANCE: - rospy.loginfo("received end of utterance") - self.stop() - - if response and not response.query_result.query_text: - rospy.loginfo("no audio request") - self.attempts +=1 - if self.attempts >= self.max_attempts: - return None - else: - # speak here - return self.listen_in_context(context) - else: - self.attempts = 0 - return reponse - - def text_in_context(self, text, context=None): - query_params = None - if context: - query_params = dialogflow.types.session_pb2.QueryParameters(contexts=[self.get_context(context)]) - - return self.streaming_client.text_request(text, query_params=query_params) - - def get_context(self, context, lifespan=1): - return dialogflow.types.context_pb2.Context(name=dialogflow.ContextsClient.context_path(self.project_id, self.session_id, context), - lifespan_count=lifespan) \ No newline at end of file diff --git a/legacy/lasr_dialogflow/src/lasr_dialogflow/actions/receptionist.py b/legacy/lasr_dialogflow/src/lasr_dialogflow/actions/receptionist.py deleted file mode 100644 index 71dcf80ec..000000000 --- a/legacy/lasr_dialogflow/src/lasr_dialogflow/actions/receptionist.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python3 -import random -import rospy -from lasr_dialogflow.actions import BaseAction -from std_msgs.msg import String -from google.api_core.exceptions import DeadlineExceeded - -class ReceptionistAction(BaseAction): - def __init__(self, project_id, df_lang_id="en"): - super(ReceptionistAction, self).__init__(project_id, df_lang_id="en") - - self.name = None - self.name_confirmed = None - self.current_context = None - self.favourite_drink = None - self.max_retries = 3 - - self.actions = { - "ask_name" : self.ask_name, - "ask_favourite_drink" : self.ask_favourite_drink, - "stop" : self.stop, - } - - def ask_name(self, use_mic=False, text=None): - - if not use_mic and text: - print(self.handle_response(self.text_in_context(text, context="GetName"), "GetName")) - return self.handle_response(self.text_in_context(text, context="GetName"), "GetName") - elif use_mic: - self.say_text("What's your name?") - attempt = 0 - while attempt < self.max_retries: - name = self.handle_response(self.listen_in_context("GetName"), "GetName") - if name: - self.say_text(f"Your name is {name}, is that correct?") - correct = self.handle_response(self.listen_in_context("ConfirmCheck"), "ConfirmCheck") - if correct == "yes": - return name - else: - self.say_text("Okay. Can you repeat your name please?") - else: - self.say_text("Sorry, I didn't get that. Can you repeat please?") - attempt +=1 - - return "" - - def ask_favourite_drink(self): - self.say_text("What's your favourite drink?") - - attempt = 0 - while attempt < self.max_retries: - drink = self.handle_response(self.listen_in_context("GetFavouriteDrink"), "GetFavouriteDrink") - if drink: - self.say_text(f"Your favourite drink is {drink}, is that correct?") - correct = self.handle_response(self.listen_in_context("ConfirmCheck"), "ConfirmCheck") - if correct == "yes": - return drink - else: - self.say_text("Okay. Can you repeat your favourite drink please?") - else: - self.say_text("Sorry, I didn't get that. Can you repeat please?") - attempt +=1 - - return "" - - def handle_response(self, response, context): - if not response: - return None - try: - if context == "GetName": - return response.query_result.parameters["given-name"] - elif context == "GetFavouriteDrink": - print(response.query_result) - return response.query_result.parameters["drink-name"] - elif context == "ConfirmCheck": - return response.query_result.parameters["yesno"] - except ValueError: - return None \ No newline at end of file diff --git a/legacy/lasr_dialogflow/src/lasr_dialogflow/dialogflow_client_stream.py b/legacy/lasr_dialogflow/src/lasr_dialogflow/dialogflow_client_stream.py deleted file mode 100644 index 2f442183b..000000000 --- a/legacy/lasr_dialogflow/src/lasr_dialogflow/dialogflow_client_stream.py +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sounddevice -import dialogflow -import rospkg - -CHUNK_SUZE = 4096 - -class DialogflowClientStream(): - - def __init__(self, project_id, session_id, language_code="en", audio_encoding=dialogflow.enums.AudioEncoding.AUDIO_ENCODING_LINEAR_16, sample_rate=None, input_device=None): - - self.input_device = input_device - self.sample_rate = sample_rate - - credentials_path = os.path.join(rospkg.RosPack().get_path('lasr_dialogflow'), 'config', '{}.json'.format(project_id)) - - self.session_client = dialogflow.SessionsClient.from_service_account_file(credentials_path) - self.session_path = self.session_client.session_path(project_id, session_id) - self.audio_config = dialogflow.types.session_pb2.InputAudioConfig( - language_code=language_code, - audio_encoding=audio_encoding, - sample_rate_hertz=sample_rate, - ) - - def response_generator(self, query_params=None): - return self.session_client.streaming_detect_intent(self.request_generator(query_params)) - - def requests_generator(self, query_params=None): - self.stop_requested = False - - microphone_stream = sounddevice.InputStream(samplerate=self.sample_rate, device=self.input_device) - - query_input = dialogflow.types.session_pb2.QueryInput(audio_config=self.audio_config) - - # First request is for config - yield dialogflow.types.session_pb2.StreamingDetectIntentRequest( - session=self.session_path, - query_input=query_input, - query_params=query_params, - single_utterance=True, - ) - - while not self.stop_requested and microphone_stream.active(): - data = microphone_stream.read(CHUNK_SIZE) - yield dialogflow.types.session_pb2.StreamingDetectIntentRequest(input_audio=data) - - microphone_stream.stop() - - def text_request(self, text, query_params=None): - text_input = dialogflow.types.session_pb2.TextInput(text=text, language_code="en-GB") - query_input = dialogflow.types.session_pb2.QueryInput(text=text_input) - response = self.session_client.detect_intent(session=self.session_path, query_input=query_input, query_params=query_params) - - return response - - def stop(self): - self.stop_requested = True diff --git a/legacy/lasr_dialogflow/src/lasr_dialogflow/dialogflow_server.py b/legacy/lasr_dialogflow/src/lasr_dialogflow/dialogflow_server.py deleted file mode 100755 index 8653b8a7b..000000000 --- a/legacy/lasr_dialogflow/src/lasr_dialogflow/dialogflow_server.py +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env python3 -import rospy -from lasr_dialogflow.srv import DialogflowAudio, DialogflowAudioResponse, DialogflowText, DialogflowTextResponse -from lasr_dialogflow.actions import ReceptionistAction -import json -import os -from std_msgs.msg import String - -class DialogflowServer(): - - def __init__(self): - - self.config = { - "robocup_receptionist" : { - "project_id" : "robocup-receptionist-evnn", - "cls" : ReceptionistAction - } - } - - self.audio_srv = rospy.Service("dialogflow_server/process_audio", DialogflowAudio, self.process_audio) - self.text_srv = rospy.Service("dialogflow_server/process_text", DialogflowText, self.process_text) - - def process_audio(self, req): - - response = DialogflowAudioResponse() - - project_id = self.config[req.task.data]["project_id"] - task = self.config[req.task.data]["cls"](project_id) - result = task.actions[req.action.data]() - - if result: - response.result = String(result) - response.success = True - - return response - - def process_text(self, req): - - response = DialogflowTextResponse() - - project_id = self.config[req.task.data]["project_id"] - task = self.config[req.task.data]["cls"](project_id) - result = task.actions[req.action.data](use_mic=False, text=req.query_text.data) - - if result: - response.result = String(result) - response.success = True - - return response - -if __name__ == "__main__": - rospy.init_node("dialogflow_server") - dialogflow_server = DialogflowServer() - rospy.spin() \ No newline at end of file diff --git a/legacy/lasr_dialogflow/srv/DialogflowAudio.srv b/legacy/lasr_dialogflow/srv/DialogflowAudio.srv deleted file mode 100644 index bdca858cf..000000000 --- a/legacy/lasr_dialogflow/srv/DialogflowAudio.srv +++ /dev/null @@ -1,5 +0,0 @@ -std_msgs/String task -std_msgs/String action ---- -std_msgs/String result -std_msgs/Bool success \ No newline at end of file diff --git a/legacy/lasr_dialogflow/srv/DialogflowText.srv b/legacy/lasr_dialogflow/srv/DialogflowText.srv deleted file mode 100644 index 2b9497c61..000000000 --- a/legacy/lasr_dialogflow/srv/DialogflowText.srv +++ /dev/null @@ -1,6 +0,0 @@ -std_msgs/String task -std_msgs/String action -std_msgs/String query_text ---- -std_msgs/String result -std_msgs/Bool success \ No newline at end of file diff --git a/legacy/lasr_interaction_server/CMakeLists.txt b/legacy/lasr_interaction_server/CMakeLists.txt deleted file mode 100644 index a26dbfad7..000000000 --- a/legacy/lasr_interaction_server/CMakeLists.txt +++ /dev/null @@ -1,202 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(lasr_interaction_server) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS message_generation message_runtime rospy std_msgs) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -# catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder -add_service_files( - FILES - SpeechInteraction.srv - TextInteraction.srv -) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -generate_messages( - DEPENDENCIES - std_msgs # Or other packages containing msgs -) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES lasr_interaction_server -# CATKIN_DEPENDS other_catkin_pkg -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include -# ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/lasr_interaction_server.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/lasr_interaction_server_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_lasr_interaction_server.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/legacy/lasr_interaction_server/README.md b/legacy/lasr_interaction_server/README.md deleted file mode 100644 index e01c74a48..000000000 --- a/legacy/lasr_interaction_server/README.md +++ /dev/null @@ -1,78 +0,0 @@ -# lasr_interaction_server - -The lasr_interaction_server package - -This package is maintained by: -- [jared](mailto:jared@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -#### `interaction_server` - -No description provided. - - -### Messages - -This package has no messages. - -### Services - -#### `TextInteraction` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| task | std_msgs/String | | -| action | std_msgs/String | | -| query_text | std_msgs/String | | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| result | std_msgs/String | | -| success | std_msgs/Bool | | - -#### `SpeechInteraction` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| task | std_msgs/String | | -| action | std_msgs/String | | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| result | std_msgs/String | | -| success | std_msgs/Bool | | - - -### Actions - -This package has no actions. diff --git a/legacy/lasr_interaction_server/launch/interaction_server.launch b/legacy/lasr_interaction_server/launch/interaction_server.launch deleted file mode 100644 index 9ab0dbf99..000000000 --- a/legacy/lasr_interaction_server/launch/interaction_server.launch +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - - \ No newline at end of file diff --git a/legacy/lasr_interaction_server/package.xml b/legacy/lasr_interaction_server/package.xml deleted file mode 100644 index f751e07b0..000000000 --- a/legacy/lasr_interaction_server/package.xml +++ /dev/null @@ -1,59 +0,0 @@ - - - lasr_interaction_server - 0.0.0 - The lasr_interaction_server package - - - - - jared - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - - - - - - - - diff --git a/legacy/lasr_interaction_server/src/interaction_server.py b/legacy/lasr_interaction_server/src/interaction_server.py deleted file mode 100755 index 9acafe7d9..000000000 --- a/legacy/lasr_interaction_server/src/interaction_server.py +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env python3 -import rospy - -from std_msgs.msg import String -from lasr_interaction_server.srv import SpeechInteraction, TextInteraction -from lasr_dialogflow.srv import DialogflowAudio, DialogflowText -from pal_interaction_msgs.msg import TtsActionGoal - -class InteractionServer(): - - def __init__(self): - - self.dialogflow_process_audio = rospy.ServiceProxy("/dialogflow_speech/process_audio", DialogflowAudio) - self.dialogflow_process_text = rospy.ServiceProxy("/dialogflow_speech/process_text", DialogflowText) - - def speech_interaction(self, req): - - response = SpeechInteractionResponse() - - dialogflow_response = self.dialogflow_process_audio(req.task, req.action) - - response.result, response.success = dialogflow_response.result, dialogflow_response.success - - return response - - def text_interaction(self, req): - - response = SpeechInteractionResponse() - - dialogflow_response = self.dialogflow_process_text(req.task, req.action, req.query_text) - - response.result, response.success = dialogflow_response.result, dialogflow_response.success - - return response - - def speak(self, text): - pass - -if __name__ == "__main__": - rospy.init_node("lasr_interaction_server") - interaction_server = InteractionServer() - rospy.spin() \ No newline at end of file diff --git a/legacy/lasr_interaction_server/srv/SpeechInteraction.srv b/legacy/lasr_interaction_server/srv/SpeechInteraction.srv deleted file mode 100644 index bdca858cf..000000000 --- a/legacy/lasr_interaction_server/srv/SpeechInteraction.srv +++ /dev/null @@ -1,5 +0,0 @@ -std_msgs/String task -std_msgs/String action ---- -std_msgs/String result -std_msgs/Bool success \ No newline at end of file diff --git a/legacy/lasr_interaction_server/srv/TextInteraction.srv b/legacy/lasr_interaction_server/srv/TextInteraction.srv deleted file mode 100644 index 2b9497c61..000000000 --- a/legacy/lasr_interaction_server/srv/TextInteraction.srv +++ /dev/null @@ -1,6 +0,0 @@ -std_msgs/String task -std_msgs/String action -std_msgs/String query_text ---- -std_msgs/String result -std_msgs/Bool success \ No newline at end of file diff --git a/legacy/lasr_navigate_to_known_person/CMakeLists.txt b/legacy/lasr_navigate_to_known_person/CMakeLists.txt deleted file mode 100644 index 3cf9eac00..000000000 --- a/legacy/lasr_navigate_to_known_person/CMakeLists.txt +++ /dev/null @@ -1,204 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(lasr_navigate_to_known_person) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - rospy -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder -# add_service_files( -# FILES -# Service1.srv -# Service2.srv -# ) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -# generate_messages( -# DEPENDENCIES -# std_msgs # Or other packages containing msgs -# ) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES lasr_navigate_to_known_person -# CATKIN_DEPENDS rospy -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/lasr_navigate_to_known_person.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/lasr_navigate_to_known_person_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_lasr_navigate_to_known_person.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/legacy/lasr_navigate_to_known_person/README.md b/legacy/lasr_navigate_to_known_person/README.md deleted file mode 100644 index 00062d574..000000000 --- a/legacy/lasr_navigate_to_known_person/README.md +++ /dev/null @@ -1,48 +0,0 @@ -# lasr_navigate_to_known_person - -The lasr_navigate_to_known_person package - -This package is maintained by: -- [elisabeth](mailto:elisabeth@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- rospy (build) -- rospy (exec) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -#### `navigate_to_known_person` - -No description provided. - - -### Messages - -This package has no messages. - -### Services - -This package has no services. - -### Actions - -This package has no actions. diff --git a/legacy/lasr_navigate_to_known_person/launch/navigate_to_known_person.launch b/legacy/lasr_navigate_to_known_person/launch/navigate_to_known_person.launch deleted file mode 100644 index a20c8b2b8..000000000 --- a/legacy/lasr_navigate_to_known_person/launch/navigate_to_known_person.launch +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/legacy/lasr_navigate_to_known_person/package.xml b/legacy/lasr_navigate_to_known_person/package.xml deleted file mode 100644 index 3cd73d964..000000000 --- a/legacy/lasr_navigate_to_known_person/package.xml +++ /dev/null @@ -1,62 +0,0 @@ - - - lasr_navigate_to_known_person - 0.0.0 - The lasr_navigate_to_known_person package - - - - - elisabeth - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - rospy - rospy - rospy - - - - - - - - diff --git a/legacy/lasr_navigate_to_known_person/setup.py b/legacy/lasr_navigate_to_known_person/setup.py deleted file mode 100644 index 24e6a3798..000000000 --- a/legacy/lasr_navigate_to_known_person/setup.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python3 - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -setup_args = generate_distutils_setup( - packages=['lasr_navigate_to_known_person'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/navigate_to_known_person.py b/legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/navigate_to_known_person.py deleted file mode 100755 index c37b2a21b..000000000 --- a/legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/navigate_to_known_person.py +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env python3 -import rospy -import smach -from lasr_navigate_to_known_person.states import GoTo, Scan -from tiago_controllers.controllers import ReachToRadius, HeadController - - -class ScanAndGoTo(smach.StateMachine): - def __init__(self, base_controller: ReachToRadius, head_controller: HeadController): - smach.StateMachine.__init__(self, outcomes=['succeeded', 'failed']) - - self.base_controller = base_controller - self.head_controller = head_controller - self.userdata.prev = 'Scan' - - with self: - smach.StateMachine.add('SCAN_ROOM', Scan(self.head_controller), - transitions={ - 'succeeded': 'GO_TO_HOST', - 'failed': 'failed' - }, - remapping={ - 'location': 'location', - 'prev': 'prev' - }) - - smach.StateMachine.add('GO_TO_HOST', GoTo(self.base_controller), - transitions={ - 'succeeded': 'succeeded', - 'failed': 'failed' - }, - remapping={ - 'location': 'location', - 'prev': 'prev' - }) - - -if __name__ == '__main__': - rospy.init_node("navigate_to_person_node", anonymous=True) - base_controller = ReachToRadius() - head_controller = HeadController() - sm = smach.StateMachine(outcomes=['success', 'no_people']) - - with sm: - smach.StateMachine.add('SCAN_AND_GO_TO', - ScanAndGoTo(base_controller, head_controller), - transitions={'succeeded': 'success', - 'failed': 'SCAN_AND_GO_TO'}) - - outcome = sm.execute() - rospy.loginfo('I have completed execution with outcome: ') - rospy.loginfo(outcome) diff --git a/legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/states/__init__.py b/legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/states/__init__.py deleted file mode 100644 index a6752cb3c..000000000 --- a/legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/states/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .go_to_state import GoTo -from .scan_state import Scan \ No newline at end of file diff --git a/legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/states/go_to_state.py b/legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/states/go_to_state.py deleted file mode 100644 index 3121966c2..000000000 --- a/legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/states/go_to_state.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python3 -import rospy -import smach -from tiago_controllers.utils import activate_robot_navigation -from tiago_controllers.controllers import ReachToRadius - -class GoTo(smach.State): - def __init__(self, base_controller: ReachToRadius): - smach.State.__init__(self, outcomes=['succeeded', 'failed'], input_keys=['location', 'prev'], - output_keys=['prev']) - self.base_controller = base_controller - - def execute(self, userdata): - # activate navigation on the robot - activate_robot_navigation(True) - self.goal = userdata.location - - # go to radius if previous state was scanning the room to detect the host - if userdata.prev == 'Scan': - rospy.sleep(2.) # give it a 2 seconds for obstacle aware nav - success = self.base_controller.sync_to_radius(self.goal.position.x, self.goal.position.y, radius=2.0, - tol=0.2) - - if success: - success = self.base_controller.sync_face_to(self.goal.position.x, self.goal.position.y) - else: - success = self.base_controller.sync_to_pose(self.goal) - - userdata.prev = 'GoTo' - if success: - rospy.loginfo(f"sending pose goal {self.goal.position} to base_controller...") - return 'succeeded' - else: - rospy.loginfo(f"base controller failed to go to position {self.goal.position}") - return 'failed' diff --git a/legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/states/scan_state.py b/legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/states/scan_state.py deleted file mode 100644 index c368dbd99..000000000 --- a/legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/states/scan_state.py +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env python3 - -import rospy -import smach -from geometry_msgs.msg import Pose, Quaternion -from sensor_msgs.msg import PointCloud2 -from tiago_controllers.utils import activate_robot_navigation -from face_detection.srv import FaceDetectionPCL - - -class Scan(smach.State): - def __init__(self, head_controller): - smach.State.__init__(self, outcomes=['succeeded', 'failed'], input_keys=['prev'], - output_keys=['location', 'prev']) - self.head_controller = head_controller - rospy.wait_for_service("pcl_face_detection_server") - self.face_detection = rospy.ServiceProxy("pcl_face_detection_server", FaceDetectionPCL) - - def execute(self, userdata): - userdata.prev = 'Scan' - - def rotate_head(direction): - self.head_controller.sync_reach_to(direction, 0.0, velocities=[0.1, 0.]) - - # deactivate navigation on the robot to move the head around - activate_robot_navigation(False) - - # run detection at most 4 times - direction = 0.2 - for _ in range(4): - direction *= -1 - rotate_head(direction) - print(f'rotating head in direction {direction}') - pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) - - resp = self.face_detection(pcl_msg) - print(resp, "my response") - if resp.detections: - for detection in resp.detections: - # print(detection.name) - # if detection.name == "female_doll": - print(detection.centroid.point) - userdata.location = Pose(detection.centroid.point, Quaternion(0, 0, 0, 1)) - return 'succeeded' - return 'failed' diff --git a/legacy/lasr_object_detection_yolo/CMakeLists.txt b/legacy/lasr_object_detection_yolo/CMakeLists.txt deleted file mode 100644 index 6abbae500..000000000 --- a/legacy/lasr_object_detection_yolo/CMakeLists.txt +++ /dev/null @@ -1,208 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(lasr_object_detection_yolo) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - rospy - std_msgs - sensor_msgs - message_runtime - message_generation - lasr_vision_msgs -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Detection.msg -# ) - -## Generate services in the 'srv' folder -add_service_files( - FILES - YoloDetection.srv -) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -generate_messages( - DEPENDENCIES - std_msgs - sensor_msgs - lasr_vision_msgs -) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES lasr_object_detection_yolo -# CATKIN_DEPENDS rospy std_msgs -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/lasr_object_detection_yolo.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/lasr_object_detection_yolo_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_lasr_object_detection_yolo.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/legacy/lasr_object_detection_yolo/README.md b/legacy/lasr_object_detection_yolo/README.md deleted file mode 100644 index 8420085ec..000000000 --- a/legacy/lasr_object_detection_yolo/README.md +++ /dev/null @@ -1,64 +0,0 @@ -# lasr_object_detection_yolo - -The lasr_object_detection_yolo package - -This package is maintained by: -- [jared](mailto:jared@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- rospy (build) -- std_msgs (build) -- rospy (exec) -- std_msgs (exec) -- lasr_vision_msgs - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -This package has no launch files. - -### Messages - -This package has no messages. - -### Services - -#### `YoloDetection` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| image_raw | sensor_msgs/Image | | -| dataset | string | | -| confidence | float32 | | -| nms | float32 | | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| detected_objects | lasr_vision_msgs/Detection[] | | - - -### Actions - -This package has no actions. diff --git a/legacy/lasr_object_detection_yolo/package.xml b/legacy/lasr_object_detection_yolo/package.xml deleted file mode 100644 index b5b188445..000000000 --- a/legacy/lasr_object_detection_yolo/package.xml +++ /dev/null @@ -1,66 +0,0 @@ - - - lasr_object_detection_yolo - 0.0.0 - The lasr_object_detection_yolo package - - - - - jared - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - rospy - std_msgs - rospy - std_msgs - rospy - std_msgs - lasr_vision_msgs - - - - - - - - \ No newline at end of file diff --git a/legacy/lasr_object_detection_yolo/setup.py b/legacy/lasr_object_detection_yolo/setup.py deleted file mode 100644 index 151d41685..000000000 --- a/legacy/lasr_object_detection_yolo/setup.py +++ /dev/null @@ -1,12 +0,0 @@ - -#!/usr/bin/env python3 - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -setup_args = generate_distutils_setup( - packages=['lasr_object_detection_yolo'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/__init__.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/config.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/config.py deleted file mode 100644 index 197160634..000000000 --- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/config.py +++ /dev/null @@ -1,100 +0,0 @@ -import torch -from darknet_pytorch.torch_utils import convert2cpu - - -def parse_cfg(cfgfile): - blocks = [] - fp = open(cfgfile, 'r') - block = None - line = fp.readline() - while line != '': - line = line.rstrip() - if line == '' or line[0] == '#': - line = fp.readline() - continue - elif line[0] == '[': - if block: - blocks.append(block) - block = dict() - block['type'] = line.lstrip('[').rstrip(']') - # set default value - if block['type'] == 'convolutional': - block['batch_normalize'] = 0 - else: - key, value = line.split('=') - key = key.strip() - if key == 'type': - key = '_type' - value = value.strip() - block[key] = value - line = fp.readline() - - if block: - blocks.append(block) - fp.close() - return blocks - - -def load_conv(buf, start, conv_model): - num_w = conv_model.weight.numel() - num_b = conv_model.bias.numel() - conv_model.bias.data.copy_(torch.from_numpy(buf[start:start + num_b])); - start = start + num_b - conv_model.weight.data.copy_(torch.from_numpy(buf[start:start + num_w]).reshape(conv_model.weight.data.shape)); - start = start + num_w - return start - - -def save_conv(fp, conv_model): - if conv_model.bias.is_cuda: - convert2cpu(conv_model.bias.data).numpy().tofile(fp) - convert2cpu(conv_model.weight.data).numpy().tofile(fp) - else: - conv_model.bias.data.numpy().tofile(fp) - conv_model.weight.data.numpy().tofile(fp) - - -def load_conv_bn(buf, start, conv_model, bn_model): - num_w = conv_model.weight.numel() - num_b = bn_model.bias.numel() - bn_model.bias.data.copy_(torch.from_numpy(buf[start:start + num_b])); - start = start + num_b - bn_model.weight.data.copy_(torch.from_numpy(buf[start:start + num_b])); - start = start + num_b - bn_model.running_mean.copy_(torch.from_numpy(buf[start:start + num_b])); - start = start + num_b - bn_model.running_var.copy_(torch.from_numpy(buf[start:start + num_b])); - start = start + num_b - conv_model.weight.data.copy_(torch.from_numpy(buf[start:start + num_w]).reshape(conv_model.weight.data.shape)); - start = start + num_w - return start - - -def save_conv_bn(fp, conv_model, bn_model): - if bn_model.bias.is_cuda: - convert2cpu(bn_model.bias.data).numpy().tofile(fp) - convert2cpu(bn_model.weight.data).numpy().tofile(fp) - convert2cpu(bn_model.running_mean).numpy().tofile(fp) - convert2cpu(bn_model.running_var).numpy().tofile(fp) - convert2cpu(conv_model.weight.data).numpy().tofile(fp) - else: - bn_model.bias.data.numpy().tofile(fp) - bn_model.weight.data.numpy().tofile(fp) - bn_model.running_mean.numpy().tofile(fp) - bn_model.running_var.numpy().tofile(fp) - conv_model.weight.data.numpy().tofile(fp) - - -def load_fc(buf, start, fc_model): - num_w = fc_model.weight.numel() - num_b = fc_model.bias.numel() - fc_model.bias.data.copy_(torch.from_numpy(buf[start:start + num_b])); - start = start + num_b - fc_model.weight.data.copy_(torch.from_numpy(buf[start:start + num_w])); - start = start + num_w - return start - - -def save_fc(fp, fc_model): - fc_model.bias.data.numpy().tofile(fp) - fc_model.weight.data.numpy().tofile(fp) diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/darknet.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/darknet.py deleted file mode 100644 index d3cab61f6..000000000 --- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/darknet.py +++ /dev/null @@ -1,480 +0,0 @@ -import torch.nn as nn -import torch.nn.functional as F -import numpy as np -from darknet_pytorch.region_loss import RegionLoss -from darknet_pytorch.yolo_layer import YoloLayer -from darknet_pytorch.config import * -from darknet_pytorch.torch_utils import * - - -class Mish(torch.nn.Module): - def __init__(self): - super().__init__() - - def forward(self, x): - x = x * (torch.tanh(torch.nn.functional.softplus(x))) - return x - - -class MaxPoolDark(nn.Module): - def __init__(self, size=2, stride=1): - super(MaxPoolDark, self).__init__() - self.size = size - self.stride = stride - - def forward(self, x): - ''' - darknet output_size = (input_size + p - k) / s +1 - p : padding = k - 1 - k : size - s : stride - torch output_size = (input_size + 2*p -k) / s +1 - p : padding = k//2 - ''' - p = self.size // 2 - if ((x.shape[2] - 1) // self.stride) != ((x.shape[2] + 2 * p - self.size) // self.stride): - padding1 = (self.size - 1) // 2 - padding2 = padding1 + 1 - else: - padding1 = (self.size - 1) // 2 - padding2 = padding1 - if ((x.shape[3] - 1) // self.stride) != ((x.shape[3] + 2 * p - self.size) // self.stride): - padding3 = (self.size - 1) // 2 - padding4 = padding3 + 1 - else: - padding3 = (self.size - 1) // 2 - padding4 = padding3 - x = F.max_pool2d(F.pad(x, (padding3, padding4, padding1, padding2), mode='replicate'), - self.size, stride=self.stride) - return x - - -class Upsample_expand(nn.Module): - def __init__(self, stride=2): - super(Upsample_expand, self).__init__() - self.stride = stride - - def forward(self, x): - assert (x.data.dim() == 4) - - x = x.view(x.size(0), x.size(1), x.size(2), 1, x.size(3), 1).\ - expand(x.size(0), x.size(1), x.size(2), self.stride, x.size(3), self.stride).contiguous().\ - view(x.size(0), x.size(1), x.size(2) * self.stride, x.size(3) * self.stride) - - return x - - -class Upsample_interpolate(nn.Module): - def __init__(self, stride): - super(Upsample_interpolate, self).__init__() - self.stride = stride - - def forward(self, x): - assert (x.data.dim() == 4) - - out = F.interpolate(x, size=(x.size(2) * self.stride, x.size(3) * self.stride), mode='nearest') - return out - - -class Reorg(nn.Module): - def __init__(self, stride=2): - super(Reorg, self).__init__() - self.stride = stride - - def forward(self, x): - stride = self.stride - assert (x.data.dim() == 4) - B = x.data.size(0) - C = x.data.size(1) - H = x.data.size(2) - W = x.data.size(3) - assert (H % stride == 0) - assert (W % stride == 0) - ws = stride - hs = stride - x = x.view(B, C, H / hs, hs, W / ws, ws).transpose(3, 4).contiguous() - x = x.view(B, C, H / hs * W / ws, hs * ws).transpose(2, 3).contiguous() - x = x.view(B, C, hs * ws, H / hs, W / ws).transpose(1, 2).contiguous() - x = x.view(B, hs * ws * C, H / hs, W / ws) - return x - - -class GlobalAvgPool2d(nn.Module): - def __init__(self): - super(GlobalAvgPool2d, self).__init__() - - def forward(self, x): - N = x.data.size(0) - C = x.data.size(1) - H = x.data.size(2) - W = x.data.size(3) - x = F.avg_pool2d(x, (H, W)) - x = x.view(N, C) - return x - - -# for route, shortcut and sam -class EmptyModule(nn.Module): - def __init__(self): - super(EmptyModule, self).__init__() - - def forward(self, x): - return x - - -# support route shortcut and reorg -class Darknet(nn.Module): - def __init__(self, cfgfile, inference=False): - super(Darknet, self).__init__() - self.inference = inference - self.training = not self.inference - - self.blocks = parse_cfg(cfgfile) - self.width = int(self.blocks[0]['width']) - self.height = int(self.blocks[0]['height']) - - self.models = self.create_network(self.blocks) # merge conv, bn,leaky - self.loss = self.models[len(self.models) - 1] - - if self.blocks[(len(self.blocks) - 1)]['type'] == 'region': - self.anchors = self.loss.anchors - self.num_anchors = self.loss.num_anchors - self.anchor_step = self.loss.anchor_step - self.num_classes = self.loss.num_classes - - self.header = torch.IntTensor([0, 0, 0, 0]) - self.seen = 0 - - def forward(self, x): - ind = -2 - self.loss = None - outputs = dict() - out_boxes = [] - for block in self.blocks: - ind = ind + 1 - # if ind > 0: - # return x - - if block['type'] == 'net': - continue - elif block['type'] in ['convolutional', 'maxpool', 'reorg', 'upsample', 'avgpool', 'softmax', 'connected']: - x = self.models[ind](x) - outputs[ind] = x - elif block['type'] == 'route': - layers = block['layers'].split(',') - layers = [int(i) if int(i) > 0 else int(i) + ind for i in layers] - if len(layers) == 1: - if 'groups' not in block.keys() or int(block['groups']) == 1: - x = outputs[layers[0]] - outputs[ind] = x - else: - groups = int(block['groups']) - group_id = int(block['group_id']) - _, b, _, _ = outputs[layers[0]].shape - x = outputs[layers[0]][:, b // groups * group_id:b // groups * (group_id + 1)] - outputs[ind] = x - elif len(layers) == 2: - x1 = outputs[layers[0]] - x2 = outputs[layers[1]] - x = torch.cat((x1, x2), 1) - outputs[ind] = x - elif len(layers) == 4: - x1 = outputs[layers[0]] - x2 = outputs[layers[1]] - x3 = outputs[layers[2]] - x4 = outputs[layers[3]] - x = torch.cat((x1, x2, x3, x4), 1) - outputs[ind] = x - else: - print("rounte number > 2 ,is {}".format(len(layers))) - - elif block['type'] == 'shortcut': - from_layer = int(block['from']) - activation = block['activation'] - from_layer = from_layer if from_layer > 0 else from_layer + ind - x1 = outputs[from_layer] - x2 = outputs[ind - 1] - x = x1 + x2 - if activation == 'leaky': - x = F.leaky_relu(x, 0.1, inplace=True) - elif activation == 'relu': - x = F.relu(x, inplace=True) - outputs[ind] = x - elif block['type'] == 'sam': - from_layer = int(block['from']) - from_layer = from_layer if from_layer > 0 else from_layer + ind - x1 = outputs[from_layer] - x2 = outputs[ind - 1] - x = x1 * x2 - outputs[ind] = x - elif block['type'] == 'region': - continue - if self.loss: - self.loss = self.loss + self.models[ind](x) - else: - self.loss = self.models[ind](x) - outputs[ind] = None - elif block['type'] == 'yolo': - # if self.training: - # pass - # else: - # boxes = self.models[ind](x) - # out_boxes.append(boxes) - boxes = self.models[ind](x) - out_boxes.append(boxes) - elif block['type'] == 'cost': - continue - else: - print('unknown type %s' % (block['type'])) - - if self.training: - return out_boxes - else: - return get_region_boxes(out_boxes) - - def print_network(self): - print_cfg(self.blocks) - - def create_network(self, blocks): - models = nn.ModuleList() - - prev_filters = 3 - out_filters = [] - prev_stride = 1 - out_strides = [] - conv_id = 0 - for block in blocks: - if block['type'] == 'net': - prev_filters = int(block['channels']) - continue - elif block['type'] == 'convolutional': - conv_id = conv_id + 1 - batch_normalize = int(block['batch_normalize']) - filters = int(block['filters']) - kernel_size = int(block['size']) - stride = int(block['stride']) - is_pad = int(block['pad']) - pad = (kernel_size - 1) // 2 if is_pad else 0 - activation = block['activation'] - model = nn.Sequential() - if batch_normalize: - model.add_module('conv{0}'.format(conv_id), - nn.Conv2d(prev_filters, filters, kernel_size, stride, pad, bias=False)) - model.add_module('bn{0}'.format(conv_id), nn.BatchNorm2d(filters)) - # model.add_module('bn{0}'.format(conv_id), BN2d(filters)) - else: - model.add_module('conv{0}'.format(conv_id), - nn.Conv2d(prev_filters, filters, kernel_size, stride, pad)) - if activation == 'leaky': - model.add_module('leaky{0}'.format(conv_id), nn.LeakyReLU(0.1, inplace=True)) - elif activation == 'relu': - model.add_module('relu{0}'.format(conv_id), nn.ReLU(inplace=True)) - elif activation == 'mish': - model.add_module('mish{0}'.format(conv_id), Mish()) - elif activation == 'linear': - model.add_module('linear{0}'.format(conv_id), nn.Identity()) - elif activation == 'logistic': - model.add_module('sigmoid{0}'.format(conv_id), nn.Sigmoid()) - else: - print("No convolutional activation named {}".format(activation)) - - prev_filters = filters - out_filters.append(prev_filters) - prev_stride = stride * prev_stride - out_strides.append(prev_stride) - models.append(model) - elif block['type'] == 'maxpool': - pool_size = int(block['size']) - stride = int(block['stride']) - if stride == 1 and pool_size % 2: - # You can use Maxpooldark instead, here is convenient to convert onnx. - # Example: [maxpool] size=3 stride=1 - model = nn.MaxPool2d(kernel_size=pool_size, stride=stride, padding=pool_size // 2) - elif stride == pool_size: - # You can use Maxpooldark instead, here is convenient to convert onnx. - # Example: [maxpool] size=2 stride=2 - model = nn.MaxPool2d(kernel_size=pool_size, stride=stride, padding=0) - else: - model = MaxPoolDark(pool_size, stride) - out_filters.append(prev_filters) - prev_stride = stride * prev_stride - out_strides.append(prev_stride) - models.append(model) - elif block['type'] == 'avgpool': - model = GlobalAvgPool2d() - out_filters.append(prev_filters) - models.append(model) - elif block['type'] == 'softmax': - model = nn.Softmax() - out_strides.append(prev_stride) - out_filters.append(prev_filters) - models.append(model) - elif block['type'] == 'cost': - if block['_type'] == 'sse': - model = nn.MSELoss(reduction='mean') - elif block['_type'] == 'L1': - model = nn.L1Loss(reduction='mean') - elif block['_type'] == 'smooth': - model = nn.SmoothL1Loss(reduction='mean') - out_filters.append(1) - out_strides.append(prev_stride) - models.append(model) - elif block['type'] == 'reorg': - stride = int(block['stride']) - prev_filters = stride * stride * prev_filters - out_filters.append(prev_filters) - prev_stride = prev_stride * stride - out_strides.append(prev_stride) - models.append(Reorg(stride)) - elif block['type'] == 'upsample': - stride = int(block['stride']) - out_filters.append(prev_filters) - prev_stride = prev_stride // stride - out_strides.append(prev_stride) - - models.append(Upsample_expand(stride)) - # models.append(Upsample_interpolate(stride)) - - elif block['type'] == 'route': - layers = block['layers'].split(',') - ind = len(models) - layers = [int(i) if int(i) > 0 else int(i) + ind for i in layers] - if len(layers) == 1: - if 'groups' not in block.keys() or int(block['groups']) == 1: - prev_filters = out_filters[layers[0]] - prev_stride = out_strides[layers[0]] - else: - prev_filters = out_filters[layers[0]] // int(block['groups']) - prev_stride = out_strides[layers[0]] // int(block['groups']) - elif len(layers) == 2: - assert (layers[0] == ind - 1 or layers[1] == ind - 1) - prev_filters = out_filters[layers[0]] + out_filters[layers[1]] - prev_stride = out_strides[layers[0]] - elif len(layers) == 4: - assert (layers[0] == ind - 1) - prev_filters = out_filters[layers[0]] + out_filters[layers[1]] + out_filters[layers[2]] + \ - out_filters[layers[3]] - prev_stride = out_strides[layers[0]] - else: - print("route error!!!") - - out_filters.append(prev_filters) - out_strides.append(prev_stride) - models.append(EmptyModule()) - elif block['type'] == 'shortcut': - ind = len(models) - prev_filters = out_filters[ind - 1] - out_filters.append(prev_filters) - prev_stride = out_strides[ind - 1] - out_strides.append(prev_stride) - models.append(EmptyModule()) - elif block['type'] == 'sam': - ind = len(models) - prev_filters = out_filters[ind - 1] - out_filters.append(prev_filters) - prev_stride = out_strides[ind - 1] - out_strides.append(prev_stride) - models.append(EmptyModule()) - elif block['type'] == 'connected': - filters = int(block['output']) - if block['activation'] == 'linear': - model = nn.Linear(prev_filters, filters) - elif block['activation'] == 'leaky': - model = nn.Sequential( - nn.Linear(prev_filters, filters), - nn.LeakyReLU(0.1, inplace=True)) - elif block['activation'] == 'relu': - model = nn.Sequential( - nn.Linear(prev_filters, filters), - nn.ReLU(inplace=True)) - prev_filters = filters - out_filters.append(prev_filters) - out_strides.append(prev_stride) - models.append(model) - elif block['type'] == 'region': - loss = RegionLoss() - anchors = block['anchors'].split(',') - loss.anchors = [float(i) for i in anchors] - loss.num_classes = int(block['classes']) - loss.num_anchors = int(block['num']) - loss.anchor_step = len(loss.anchors) // loss.num_anchors - loss.object_scale = float(block['object_scale']) - loss.noobject_scale = float(block['noobject_scale']) - loss.class_scale = float(block['class_scale']) - loss.coord_scale = float(block['coord_scale']) - out_filters.append(prev_filters) - out_strides.append(prev_stride) - models.append(loss) - elif block['type'] == 'yolo': - yolo_layer = YoloLayer() - anchors = block['anchors'].split(',') - anchor_mask = block['mask'].split(',') - yolo_layer.anchor_mask = [int(i) for i in anchor_mask] - yolo_layer.anchors = [float(i) for i in anchors] - yolo_layer.num_classes = int(block['classes']) - self.num_classes = yolo_layer.num_classes - yolo_layer.num_anchors = int(block['num']) - yolo_layer.anchor_step = len(yolo_layer.anchors) // yolo_layer.num_anchors - yolo_layer.stride = prev_stride - yolo_layer.scale_x_y = float(block['scale_x_y']) - out_filters.append(prev_filters) - out_strides.append(prev_stride) - models.append(yolo_layer) - else: - print('unknown type %s' % (block['type'])) - - return models - - def load_weights(self, weightfile): - fp = open(weightfile, 'rb') - header = np.fromfile(fp, count=5, dtype=np.int32) - self.header = torch.from_numpy(header) - self.seen = self.header[3] - buf = np.fromfile(fp, dtype=np.float32) - fp.close() - - start = 0 - ind = -2 - for block in self.blocks: - if start >= buf.size: - break - ind = ind + 1 - if block['type'] == 'net': - continue - elif block['type'] == 'convolutional': - model = self.models[ind] - batch_normalize = int(block['batch_normalize']) - if batch_normalize: - start = load_conv_bn(buf, start, model[0], model[1]) - else: - start = load_conv(buf, start, model[0]) - elif block['type'] == 'connected': - model = self.models[ind] - if block['activation'] != 'linear': - start = load_fc(buf, start, model[0]) - else: - start = load_fc(buf, start, model) - elif block['type'] == 'maxpool': - pass - elif block['type'] == 'reorg': - pass - elif block['type'] == 'upsample': - pass - elif block['type'] == 'route': - pass - elif block['type'] == 'shortcut': - pass - elif block['type'] == 'sam': - pass - elif block['type'] == 'region': - pass - elif block['type'] == 'yolo': - pass - elif block['type'] == 'avgpool': - pass - elif block['type'] == 'softmax': - pass - elif block['type'] == 'cost': - pass - else: - print('unknown type %s' % (block['type'])) \ No newline at end of file diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/region_loss.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/region_loss.py deleted file mode 100644 index 75d413376..000000000 --- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/region_loss.py +++ /dev/null @@ -1,195 +0,0 @@ -import torch.nn as nn -import torch.nn.functional as F -from darknet_pytorch.torch_utils import * - - -def build_targets(pred_boxes, target, anchors, num_anchors, num_classes, nH, nW, noobject_scale, object_scale, - sil_thresh, seen): - nB = target.size(0) - nA = num_anchors - nC = num_classes - anchor_step = len(anchors) / num_anchors - conf_mask = torch.ones(nB, nA, nH, nW) * noobject_scale - coord_mask = torch.zeros(nB, nA, nH, nW) - cls_mask = torch.zeros(nB, nA, nH, nW) - tx = torch.zeros(nB, nA, nH, nW) - ty = torch.zeros(nB, nA, nH, nW) - tw = torch.zeros(nB, nA, nH, nW) - th = torch.zeros(nB, nA, nH, nW) - tconf = torch.zeros(nB, nA, nH, nW) - tcls = torch.zeros(nB, nA, nH, nW) - - nAnchors = nA * nH * nW - nPixels = nH * nW - for b in range(nB): - cur_pred_boxes = pred_boxes[b * nAnchors:(b + 1) * nAnchors].t() - cur_ious = torch.zeros(nAnchors) - for t in range(50): - if target[b][t * 5 + 1] == 0: - break - gx = target[b][t * 5 + 1] * nW - gy = target[b][t * 5 + 2] * nH - gw = target[b][t * 5 + 3] * nW - gh = target[b][t * 5 + 4] * nH - cur_gt_boxes = torch.FloatTensor([gx, gy, gw, gh]).repeat(nAnchors, 1).t() - cur_ious = torch.max(cur_ious, bbox_ious(cur_pred_boxes, cur_gt_boxes, x1y1x2y2=False)) - conf_mask[b][cur_ious > sil_thresh] = 0 - if seen < 12800: - if anchor_step == 4: - tx = torch.FloatTensor(anchors).view(nA, anchor_step).index_select(1, torch.LongTensor([2])).view(1, nA, 1, - 1).repeat( - nB, 1, nH, nW) - ty = torch.FloatTensor(anchors).view(num_anchors, anchor_step).index_select(1, torch.LongTensor([2])).view( - 1, nA, 1, 1).repeat(nB, 1, nH, nW) - else: - tx.fill_(0.5) - ty.fill_(0.5) - tw.zero_() - th.zero_() - coord_mask.fill_(1) - - nGT = 0 - nCorrect = 0 - for b in range(nB): - for t in range(50): - if target[b][t * 5 + 1] == 0: - break - nGT = nGT + 1 - best_iou = 0.0 - best_n = -1 - min_dist = 10000 - gx = target[b][t * 5 + 1] * nW - gy = target[b][t * 5 + 2] * nH - gi = int(gx) - gj = int(gy) - gw = target[b][t * 5 + 3] * nW - gh = target[b][t * 5 + 4] * nH - gt_box = [0, 0, gw, gh] - for n in range(nA): - aw = anchors[anchor_step * n] - ah = anchors[anchor_step * n + 1] - anchor_box = [0, 0, aw, ah] - iou = bbox_iou(anchor_box, gt_box, x1y1x2y2=False) - if anchor_step == 4: - ax = anchors[anchor_step * n + 2] - ay = anchors[anchor_step * n + 3] - dist = pow(((gi + ax) - gx), 2) + pow(((gj + ay) - gy), 2) - if iou > best_iou: - best_iou = iou - best_n = n - elif anchor_step == 4 and iou == best_iou and dist < min_dist: - best_iou = iou - best_n = n - min_dist = dist - - gt_box = [gx, gy, gw, gh] - pred_box = pred_boxes[b * nAnchors + best_n * nPixels + gj * nW + gi] - - coord_mask[b][best_n][gj][gi] = 1 - cls_mask[b][best_n][gj][gi] = 1 - conf_mask[b][best_n][gj][gi] = object_scale - tx[b][best_n][gj][gi] = target[b][t * 5 + 1] * nW - gi - ty[b][best_n][gj][gi] = target[b][t * 5 + 2] * nH - gj - tw[b][best_n][gj][gi] = math.log(gw / anchors[anchor_step * best_n]) - th[b][best_n][gj][gi] = math.log(gh / anchors[anchor_step * best_n + 1]) - iou = bbox_iou(gt_box, pred_box, x1y1x2y2=False) # best_iou - tconf[b][best_n][gj][gi] = iou - tcls[b][best_n][gj][gi] = target[b][t * 5] - if iou > 0.5: - nCorrect = nCorrect + 1 - - return nGT, nCorrect, coord_mask, conf_mask, cls_mask, tx, ty, tw, th, tconf, tcls - - -class RegionLoss(nn.Module): - def __init__(self, num_classes=0, anchors=[], num_anchors=1): - super(RegionLoss, self).__init__() - self.num_classes = num_classes - self.anchors = anchors - self.num_anchors = num_anchors - self.anchor_step = len(anchors) / num_anchors - self.coord_scale = 1 - self.noobject_scale = 1 - self.object_scale = 5 - self.class_scale = 1 - self.thresh = 0.6 - self.seen = 0 - - def forward(self, output, target): - # output : BxAs*(4+1+num_classes)*H*W - t0 = time.time() - nB = output.data.size(0) - nA = self.num_anchors - nC = self.num_classes - nH = output.data.size(2) - nW = output.data.size(3) - - output = output.view(nB, nA, (5 + nC), nH, nW) - x = F.sigmoid(output.index_select(2, Variable(torch.cuda.LongTensor([0]))).view(nB, nA, nH, nW)) - y = F.sigmoid(output.index_select(2, Variable(torch.cuda.LongTensor([1]))).view(nB, nA, nH, nW)) - w = output.index_select(2, Variable(torch.cuda.LongTensor([2]))).view(nB, nA, nH, nW) - h = output.index_select(2, Variable(torch.cuda.LongTensor([3]))).view(nB, nA, nH, nW) - conf = F.sigmoid(output.index_select(2, Variable(torch.cuda.LongTensor([4]))).view(nB, nA, nH, nW)) - cls = output.index_select(2, Variable(torch.linspace(5, 5 + nC - 1, nC).long().cuda())) - cls = cls.view(nB * nA, nC, nH * nW).transpose(1, 2).contiguous().view(nB * nA * nH * nW, nC) - t1 = time.time() - - pred_boxes = torch.cuda.FloatTensor(4, nB * nA * nH * nW) - grid_x = torch.linspace(0, nW - 1, nW).repeat(nH, 1).repeat(nB * nA, 1, 1).view(nB * nA * nH * nW).cuda() - grid_y = torch.linspace(0, nH - 1, nH).repeat(nW, 1).t().repeat(nB * nA, 1, 1).view(nB * nA * nH * nW).cuda() - anchor_w = torch.Tensor(self.anchors).view(nA, self.anchor_step).index_select(1, torch.LongTensor([0])).cuda() - anchor_h = torch.Tensor(self.anchors).view(nA, self.anchor_step).index_select(1, torch.LongTensor([1])).cuda() - anchor_w = anchor_w.repeat(nB, 1).repeat(1, 1, nH * nW).view(nB * nA * nH * nW) - anchor_h = anchor_h.repeat(nB, 1).repeat(1, 1, nH * nW).view(nB * nA * nH * nW) - pred_boxes[0] = x.data + grid_x - pred_boxes[1] = y.data + grid_y - pred_boxes[2] = torch.exp(w.data) * anchor_w - pred_boxes[3] = torch.exp(h.data) * anchor_h - pred_boxes = convert2cpu(pred_boxes.transpose(0, 1).contiguous().view(-1, 4)) - t2 = time.time() - - nGT, nCorrect, coord_mask, conf_mask, cls_mask, tx, ty, tw, th, tconf, tcls = build_targets(pred_boxes, - target.data, - self.anchors, nA, - nC, \ - nH, nW, - self.noobject_scale, - self.object_scale, - self.thresh, - self.seen) - cls_mask = (cls_mask == 1) - nProposals = int((conf > 0.25).sum().data[0]) - - tx = Variable(tx.cuda()) - ty = Variable(ty.cuda()) - tw = Variable(tw.cuda()) - th = Variable(th.cuda()) - tconf = Variable(tconf.cuda()) - tcls = Variable(tcls.view(-1)[cls_mask].long().cuda()) - - coord_mask = Variable(coord_mask.cuda()) - conf_mask = Variable(conf_mask.cuda().sqrt()) - cls_mask = Variable(cls_mask.view(-1, 1).repeat(1, nC).cuda()) - cls = cls[cls_mask].view(-1, nC) - - t3 = time.time() - - loss_x = self.coord_scale * nn.MSELoss(reduction='sum')(x * coord_mask, tx * coord_mask) / 2.0 - loss_y = self.coord_scale * nn.MSELoss(reduction='sum')(y * coord_mask, ty * coord_mask) / 2.0 - loss_w = self.coord_scale * nn.MSELoss(reduction='sum')(w * coord_mask, tw * coord_mask) / 2.0 - loss_h = self.coord_scale * nn.MSELoss(reduction='sum')(h * coord_mask, th * coord_mask) / 2.0 - loss_conf = nn.MSELoss(reduction='sum')(conf * conf_mask, tconf * conf_mask) / 2.0 - loss_cls = self.class_scale * nn.CrossEntropyLoss(reduction='sum')(cls, tcls) - loss = loss_x + loss_y + loss_w + loss_h + loss_conf + loss_cls - t4 = time.time() - if False: - print('-----------------------------------') - print(' activation : %f' % (t1 - t0)) - print(' create pred_boxes : %f' % (t2 - t1)) - print(' build targets : %f' % (t3 - t2)) - print(' create loss : %f' % (t4 - t3)) - print(' total : %f' % (t4 - t0)) - print('%d: nGT %d, recall %d, proposals %d, loss: x %f, y %f, w %f, h %f, conf %f, cls %f, total %f' % ( - self.seen, nGT, nCorrect, nProposals, loss_x.data[0], loss_y.data[0], loss_w.data[0], loss_h.data[0], - loss_conf.data[0], loss_cls.data[0], loss.data[0])) - return loss diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/torch_utils.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/torch_utils.py deleted file mode 100644 index 8a9b499ad..000000000 --- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/torch_utils.py +++ /dev/null @@ -1,105 +0,0 @@ -import sys -import os -import time -import math -import torch -import numpy as np -from torch.autograd import Variable - -import itertools -import struct # get_image_size -import imghdr # get_image_size - -from darknet_pytorch import utils - - -def bbox_ious(boxes1, boxes2, x1y1x2y2=True): - if x1y1x2y2: - mx = torch.min(boxes1[0], boxes2[0]) - Mx = torch.max(boxes1[2], boxes2[2]) - my = torch.min(boxes1[1], boxes2[1]) - My = torch.max(boxes1[3], boxes2[3]) - w1 = boxes1[2] - boxes1[0] - h1 = boxes1[3] - boxes1[1] - w2 = boxes2[2] - boxes2[0] - h2 = boxes2[3] - boxes2[1] - else: - mx = torch.min(boxes1[0] - boxes1[2] / 2.0, boxes2[0] - boxes2[2] / 2.0) - Mx = torch.max(boxes1[0] + boxes1[2] / 2.0, boxes2[0] + boxes2[2] / 2.0) - my = torch.min(boxes1[1] - boxes1[3] / 2.0, boxes2[1] - boxes2[3] / 2.0) - My = torch.max(boxes1[1] + boxes1[3] / 2.0, boxes2[1] + boxes2[3] / 2.0) - w1 = boxes1[2] - h1 = boxes1[3] - w2 = boxes2[2] - h2 = boxes2[3] - uw = Mx - mx - uh = My - my - cw = w1 + w2 - uw - ch = h1 + h2 - uh - mask = ((cw <= 0) + (ch <= 0) > 0) - area1 = w1 * h1 - area2 = w2 * h2 - carea = cw * ch - carea[mask] = 0 - uarea = area1 + area2 - carea - return carea / uarea - - -def get_region_boxes(boxes_and_confs): - - # print('Getting boxes from boxes and confs ...') - - boxes_list = [] - confs_list = [] - - for item in boxes_and_confs: - boxes_list.append(item[0]) - confs_list.append(item[1]) - - # boxes: [batch, num1 + num2 + num3, 1, 4] - # confs: [batch, num1 + num2 + num3, num_classes] - boxes = torch.cat(boxes_list, dim=1) - confs = torch.cat(confs_list, dim=1) - - return [boxes, confs] - - -def convert2cpu(gpu_matrix): - return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix) - - -def convert2cpu_long(gpu_matrix): - return torch.LongTensor(gpu_matrix.size()).copy_(gpu_matrix) - - - -def do_detect(model, img, conf_thresh, nms_thresh, use_cuda=1): - model.eval() - with torch.no_grad(): - t0 = time.time() - - if type(img) == np.ndarray and len(img.shape) == 3: # cv2 image - img = torch.from_numpy(img.transpose(2, 0, 1)).float().div(255.0).unsqueeze(0) - elif type(img) == np.ndarray and len(img.shape) == 4: - img = torch.from_numpy(img.transpose(0, 3, 1, 2)).float().div(255.0) - else: - print("unknow image type") - exit(-1) - - if use_cuda: - img = img.cuda() - img = torch.autograd.Variable(img) - - t1 = time.time() - - output = model(img) - - t2 = time.time() - - print('-----------------------------------') - print(' Preprocess : %f' % (t1 - t0)) - print(' Model Inference : %f' % (t2 - t1)) - print('-----------------------------------') - - return utils.post_processing(img, conf_thresh, nms_thresh, output) - diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/utils.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/utils.py deleted file mode 100644 index a42e62642..000000000 --- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/utils.py +++ /dev/null @@ -1,239 +0,0 @@ -import sys -import os -import time -import math -import numpy as np - -import itertools -import struct # get_image_size -import imghdr # get_image_size - - -def sigmoid(x): - return 1.0 / (np.exp(-x) + 1.) - - -def softmax(x): - x = np.exp(x - np.expand_dims(np.max(x, axis=1), axis=1)) - x = x / np.expand_dims(x.sum(axis=1), axis=1) - return x - - -def bbox_iou(box1, box2, x1y1x2y2=True): - - # print('iou box1:', box1) - # print('iou box2:', box2) - - if x1y1x2y2: - mx = min(box1[0], box2[0]) - Mx = max(box1[2], box2[2]) - my = min(box1[1], box2[1]) - My = max(box1[3], box2[3]) - w1 = box1[2] - box1[0] - h1 = box1[3] - box1[1] - w2 = box2[2] - box2[0] - h2 = box2[3] - box2[1] - else: - w1 = box1[2] - h1 = box1[3] - w2 = box2[2] - h2 = box2[3] - - mx = min(box1[0], box2[0]) - Mx = max(box1[0] + w1, box2[0] + w2) - my = min(box1[1], box2[1]) - My = max(box1[1] + h1, box2[1] + h2) - uw = Mx - mx - uh = My - my - cw = w1 + w2 - uw - ch = h1 + h2 - uh - carea = 0 - if cw <= 0 or ch <= 0: - return 0.0 - - area1 = w1 * h1 - area2 = w2 * h2 - carea = cw * ch - uarea = area1 + area2 - carea - return carea / uarea - - -def nms_cpu(boxes, confs, nms_thresh=0.5, min_mode=False): - # print(boxes.shape) - x1 = boxes[:, 0] - y1 = boxes[:, 1] - x2 = boxes[:, 2] - y2 = boxes[:, 3] - - areas = (x2 - x1) * (y2 - y1) - order = confs.argsort()[::-1] - - keep = [] - while order.size > 0: - idx_self = order[0] - idx_other = order[1:] - - keep.append(idx_self) - - xx1 = np.maximum(x1[idx_self], x1[idx_other]) - yy1 = np.maximum(y1[idx_self], y1[idx_other]) - xx2 = np.minimum(x2[idx_self], x2[idx_other]) - yy2 = np.minimum(y2[idx_self], y2[idx_other]) - - w = np.maximum(0.0, xx2 - xx1) - h = np.maximum(0.0, yy2 - yy1) - inter = w * h - - if min_mode: - over = inter / np.minimum(areas[order[0]], areas[order[1:]]) - else: - over = inter / (areas[order[0]] + areas[order[1:]] - inter) - - inds = np.where(over <= nms_thresh)[0] - order = order[inds + 1] - - return np.array(keep) - - - -def plot_boxes_cv2(img, boxes, savename=None, class_names=None, color=None): - import cv2 - img = np.copy(img) - colors = np.array([[1, 0, 1], [0, 0, 1], [0, 1, 1], [0, 1, 0], [1, 1, 0], [1, 0, 0]], dtype=np.float32) - - def get_color(c, x, max_val): - ratio = float(x) / max_val * 5 - i = int(math.floor(ratio)) - j = int(math.ceil(ratio)) - ratio = ratio - i - r = (1 - ratio) * colors[i][c] + ratio * colors[j][c] - return int(r * 255) - - width = img.shape[1] - height = img.shape[0] - for i in range(len(boxes)): - box = boxes[i] - x1 = int(box[0] * width) - y1 = int(box[1] * height) - x2 = int(box[2] * width) - y2 = int(box[3] * height) - bbox_thick = int(0.6 * (height + width) / 600) - if color: - rgb = color - else: - rgb = (255, 0, 0) - if len(box) >= 7 and class_names: - cls_conf = box[5] - cls_id = box[6] - print('%s: %f' % (class_names[cls_id], cls_conf)) - classes = len(class_names) - offset = cls_id * 123457 % classes - red = get_color(2, offset, classes) - green = get_color(1, offset, classes) - blue = get_color(0, offset, classes) - if color is None: - rgb = (red, green, blue) - msg = str(class_names[cls_id])+" "+str(round(cls_conf,3)) - t_size = cv2.getTextSize(msg, 0, 0.7, thickness=bbox_thick // 2)[0] - c1, c2 = (x1,y1), (x2, y2) - c3 = (c1[0] + t_size[0], c1[1] - t_size[1] - 3) - cv2.rectangle(img, (x1,y1), (np.float32(c3[0]), np.float32(c3[1])), rgb, -1) - img = cv2.putText(img, msg, (c1[0], np.float32(c1[1] - 2)), cv2.FONT_HERSHEY_SIMPLEX,0.7, (0,0,0), bbox_thick//2,lineType=cv2.LINE_AA) - - img = cv2.rectangle(img, (x1, y1), (x2, y2), rgb, bbox_thick) - if savename: - print("save plot results to %s" % savename) - cv2.imwrite(savename, img) - return img - - -def read_truths(lab_path): - if not os.path.exists(lab_path): - return np.array([]) - if os.path.getsize(lab_path): - truths = np.loadtxt(lab_path) - truths = truths.reshape(truths.size / 5, 5) # to avoid single truth problem - return truths - else: - return np.array([]) - - -def load_class_names(namesfile): - class_names = [] - with open(namesfile, 'r') as fp: - lines = fp.readlines() - for line in lines: - line = line.rstrip() - class_names.append(line) - return class_names - - - -def post_processing(img, conf_thresh, nms_thresh, output): - - # anchors = [12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401] - # num_anchors = 9 - # anchor_masks = [[0, 1, 2], [3, 4, 5], [6, 7, 8]] - # strides = [8, 16, 32] - # anchor_step = len(anchors) // num_anchors - - # [batch, num, 1, 4] - box_array = output[0] - # [batch, num, num_classes] - confs = output[1] - - t1 = time.time() - - if type(box_array).__name__ != 'ndarray': - box_array = box_array.cpu().detach().numpy() - confs = confs.cpu().detach().numpy() - - num_classes = confs.shape[2] - - # [batch, num, 4] - box_array = box_array[:, :, 0] - - # [batch, num, num_classes] --> [batch, num] - max_conf = np.max(confs, axis=2) - max_id = np.argmax(confs, axis=2) - - t2 = time.time() - - bboxes_batch = [] - for i in range(box_array.shape[0]): - - argwhere = max_conf[i] > conf_thresh - l_box_array = box_array[i, argwhere, :] - l_max_conf = max_conf[i, argwhere] - l_max_id = max_id[i, argwhere] - - bboxes = [] - # nms for each class - for j in range(num_classes): - - cls_argwhere = l_max_id == j - ll_box_array = l_box_array[cls_argwhere, :] - ll_max_conf = l_max_conf[cls_argwhere] - ll_max_id = l_max_id[cls_argwhere] - - keep = nms_cpu(ll_box_array, ll_max_conf, nms_thresh) - - if (keep.size > 0): - ll_box_array = ll_box_array[keep, :] - ll_max_conf = ll_max_conf[keep] - ll_max_id = ll_max_id[keep] - - for k in range(ll_box_array.shape[0]): - bboxes.append([ll_box_array[k, 0], ll_box_array[k, 1], ll_box_array[k, 2], ll_box_array[k, 3], ll_max_conf[k], ll_max_conf[k], ll_max_id[k]]) - - bboxes_batch.append(bboxes) - - t3 = time.time() - - print('-----------------------------------') - print(' max and argmax : %f' % (t2 - t1)) - print(' nms : %f' % (t3 - t2)) - print('Post processing total : %f' % (t3 - t1)) - print('-----------------------------------') - - return bboxes_batch diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/yolo_layer.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/yolo_layer.py deleted file mode 100644 index 1ea21566b..000000000 --- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/yolo_layer.py +++ /dev/null @@ -1,322 +0,0 @@ -import torch.nn as nn -import torch.nn.functional as F -from darknet_pytorch.torch_utils import * - -def yolo_forward(output, conf_thresh, num_classes, anchors, num_anchors, scale_x_y, only_objectness=1, - validation=False): - # Output would be invalid if it does not satisfy this assert - # assert (output.size(1) == (5 + num_classes) * num_anchors) - - # print(output.size()) - - # Slice the second dimension (channel) of output into: - # [ 2, 2, 1, num_classes, 2, 2, 1, num_classes, 2, 2, 1, num_classes ] - # And then into - # bxy = [ 6 ] bwh = [ 6 ] det_conf = [ 3 ] cls_conf = [ num_classes * 3 ] - batch = output.size(0) - H = output.size(2) - W = output.size(3) - - bxy_list = [] - bwh_list = [] - det_confs_list = [] - cls_confs_list = [] - - for i in range(num_anchors): - begin = i * (5 + num_classes) - end = (i + 1) * (5 + num_classes) - - bxy_list.append(output[:, begin : begin + 2]) - bwh_list.append(output[:, begin + 2 : begin + 4]) - det_confs_list.append(output[:, begin + 4 : begin + 5]) - cls_confs_list.append(output[:, begin + 5 : end]) - - # Shape: [batch, num_anchors * 2, H, W] - bxy = torch.cat(bxy_list, dim=1) - # Shape: [batch, num_anchors * 2, H, W] - bwh = torch.cat(bwh_list, dim=1) - - # Shape: [batch, num_anchors, H, W] - det_confs = torch.cat(det_confs_list, dim=1) - # Shape: [batch, num_anchors * H * W] - det_confs = det_confs.view(batch, num_anchors * H * W) - - # Shape: [batch, num_anchors * num_classes, H, W] - cls_confs = torch.cat(cls_confs_list, dim=1) - # Shape: [batch, num_anchors, num_classes, H * W] - cls_confs = cls_confs.view(batch, num_anchors, num_classes, H * W) - # Shape: [batch, num_anchors, num_classes, H * W] --> [batch, num_anchors * H * W, num_classes] - cls_confs = cls_confs.permute(0, 1, 3, 2).reshape(batch, num_anchors * H * W, num_classes) - - # Apply sigmoid(), exp() and softmax() to slices - # - bxy = torch.sigmoid(bxy) * scale_x_y - 0.5 * (scale_x_y - 1) - bwh = torch.exp(bwh) - det_confs = torch.sigmoid(det_confs) - cls_confs = torch.sigmoid(cls_confs) - - # Prepare C-x, C-y, P-w, P-h (None of them are torch related) - grid_x = np.expand_dims(np.expand_dims(np.expand_dims(np.linspace(0, W - 1, W), axis=0).repeat(H, 0), axis=0), axis=0) - grid_y = np.expand_dims(np.expand_dims(np.expand_dims(np.linspace(0, H - 1, H), axis=1).repeat(W, 1), axis=0), axis=0) - # grid_x = torch.linspace(0, W - 1, W).reshape(1, 1, 1, W).repeat(1, 1, H, 1) - # grid_y = torch.linspace(0, H - 1, H).reshape(1, 1, H, 1).repeat(1, 1, 1, W) - - anchor_w = [] - anchor_h = [] - for i in range(num_anchors): - anchor_w.append(anchors[i * 2]) - anchor_h.append(anchors[i * 2 + 1]) - - device = None - cuda_check = output.is_cuda - if cuda_check: - device = output.get_device() - - bx_list = [] - by_list = [] - bw_list = [] - bh_list = [] - - # Apply C-x, C-y, P-w, P-h - for i in range(num_anchors): - ii = i * 2 - # Shape: [batch, 1, H, W] - bx = bxy[:, ii : ii + 1] + torch.tensor(grid_x, device=device, dtype=torch.float32) # grid_x.to(device=device, dtype=torch.float32) - # Shape: [batch, 1, H, W] - by = bxy[:, ii + 1 : ii + 2] + torch.tensor(grid_y, device=device, dtype=torch.float32) # grid_y.to(device=device, dtype=torch.float32) - # Shape: [batch, 1, H, W] - bw = bwh[:, ii : ii + 1] * anchor_w[i] - # Shape: [batch, 1, H, W] - bh = bwh[:, ii + 1 : ii + 2] * anchor_h[i] - - bx_list.append(bx) - by_list.append(by) - bw_list.append(bw) - bh_list.append(bh) - - - ######################################## - # Figure out bboxes from slices # - ######################################## - - # Shape: [batch, num_anchors, H, W] - bx = torch.cat(bx_list, dim=1) - # Shape: [batch, num_anchors, H, W] - by = torch.cat(by_list, dim=1) - # Shape: [batch, num_anchors, H, W] - bw = torch.cat(bw_list, dim=1) - # Shape: [batch, num_anchors, H, W] - bh = torch.cat(bh_list, dim=1) - - # Shape: [batch, 2 * num_anchors, H, W] - bx_bw = torch.cat((bx, bw), dim=1) - # Shape: [batch, 2 * num_anchors, H, W] - by_bh = torch.cat((by, bh), dim=1) - - # normalize coordinates to [0, 1] - bx_bw /= W - by_bh /= H - - # Shape: [batch, num_anchors * H * W, 1] - bx = bx_bw[:, :num_anchors].view(batch, num_anchors * H * W, 1) - by = by_bh[:, :num_anchors].view(batch, num_anchors * H * W, 1) - bw = bx_bw[:, num_anchors:].view(batch, num_anchors * H * W, 1) - bh = by_bh[:, num_anchors:].view(batch, num_anchors * H * W, 1) - - bx1 = bx - bw * 0.5 - by1 = by - bh * 0.5 - bx2 = bx1 + bw - by2 = by1 + bh - - # Shape: [batch, num_anchors * h * w, 4] -> [batch, num_anchors * h * w, 1, 4] - boxes = torch.cat((bx1, by1, bx2, by2), dim=2).view(batch, num_anchors * H * W, 1, 4) - # boxes = boxes.repeat(1, 1, num_classes, 1) - - # boxes: [batch, num_anchors * H * W, 1, 4] - # cls_confs: [batch, num_anchors * H * W, num_classes] - # det_confs: [batch, num_anchors * H * W] - - det_confs = det_confs.view(batch, num_anchors * H * W, 1) - confs = cls_confs * det_confs - - # boxes: [batch, num_anchors * H * W, 1, 4] - # confs: [batch, num_anchors * H * W, num_classes] - - return boxes, confs - - -def yolo_forward_dynamic(output, conf_thresh, num_classes, anchors, num_anchors, scale_x_y, only_objectness=1, - validation=False): - # Output would be invalid if it does not satisfy this assert - # assert (output.size(1) == (5 + num_classes) * num_anchors) - - # print(output.size()) - - # Slice the second dimension (channel) of output into: - # [ 2, 2, 1, num_classes, 2, 2, 1, num_classes, 2, 2, 1, num_classes ] - # And then into - # bxy = [ 6 ] bwh = [ 6 ] det_conf = [ 3 ] cls_conf = [ num_classes * 3 ] - # batch = output.size(0) - # H = output.size(2) - # W = output.size(3) - - bxy_list = [] - bwh_list = [] - det_confs_list = [] - cls_confs_list = [] - - for i in range(num_anchors): - begin = i * (5 + num_classes) - end = (i + 1) * (5 + num_classes) - - bxy_list.append(output[:, begin : begin + 2]) - bwh_list.append(output[:, begin + 2 : begin + 4]) - det_confs_list.append(output[:, begin + 4 : begin + 5]) - cls_confs_list.append(output[:, begin + 5 : end]) - - # Shape: [batch, num_anchors * 2, H, W] - bxy = torch.cat(bxy_list, dim=1) - # Shape: [batch, num_anchors * 2, H, W] - bwh = torch.cat(bwh_list, dim=1) - - # Shape: [batch, num_anchors, H, W] - det_confs = torch.cat(det_confs_list, dim=1) - # Shape: [batch, num_anchors * H * W] - det_confs = det_confs.view(output.size(0), num_anchors * output.size(2) * output.size(3)) - - # Shape: [batch, num_anchors * num_classes, H, W] - cls_confs = torch.cat(cls_confs_list, dim=1) - # Shape: [batch, num_anchors, num_classes, H * W] - cls_confs = cls_confs.view(output.size(0), num_anchors, num_classes, output.size(2) * output.size(3)) - # Shape: [batch, num_anchors, num_classes, H * W] --> [batch, num_anchors * H * W, num_classes] - cls_confs = cls_confs.permute(0, 1, 3, 2).reshape(output.size(0), num_anchors * output.size(2) * output.size(3), num_classes) - - # Apply sigmoid(), exp() and softmax() to slices - # - bxy = torch.sigmoid(bxy) * scale_x_y - 0.5 * (scale_x_y - 1) - bwh = torch.exp(bwh) - det_confs = torch.sigmoid(det_confs) - cls_confs = torch.sigmoid(cls_confs) - - # Prepare C-x, C-y, P-w, P-h (None of them are torch related) - grid_x = np.expand_dims(np.expand_dims(np.expand_dims(np.linspace(0, output.size(3) - 1, output.size(3)), axis=0).repeat(output.size(2), 0), axis=0), axis=0) - grid_y = np.expand_dims(np.expand_dims(np.expand_dims(np.linspace(0, output.size(2) - 1, output.size(2)), axis=1).repeat(output.size(3), 1), axis=0), axis=0) - # grid_x = torch.linspace(0, W - 1, W).reshape(1, 1, 1, W).repeat(1, 1, H, 1) - # grid_y = torch.linspace(0, H - 1, H).reshape(1, 1, H, 1).repeat(1, 1, 1, W) - - anchor_w = [] - anchor_h = [] - for i in range(num_anchors): - anchor_w.append(anchors[i * 2]) - anchor_h.append(anchors[i * 2 + 1]) - - device = None - cuda_check = output.is_cuda - if cuda_check: - device = output.get_device() - - bx_list = [] - by_list = [] - bw_list = [] - bh_list = [] - - # Apply C-x, C-y, P-w, P-h - for i in range(num_anchors): - ii = i * 2 - # Shape: [batch, 1, H, W] - bx = bxy[:, ii : ii + 1] + torch.tensor(grid_x, device=device, dtype=torch.float32) # grid_x.to(device=device, dtype=torch.float32) - # Shape: [batch, 1, H, W] - by = bxy[:, ii + 1 : ii + 2] + torch.tensor(grid_y, device=device, dtype=torch.float32) # grid_y.to(device=device, dtype=torch.float32) - # Shape: [batch, 1, H, W] - bw = bwh[:, ii : ii + 1] * anchor_w[i] - # Shape: [batch, 1, H, W] - bh = bwh[:, ii + 1 : ii + 2] * anchor_h[i] - - bx_list.append(bx) - by_list.append(by) - bw_list.append(bw) - bh_list.append(bh) - - - ######################################## - # Figure out bboxes from slices # - ######################################## - - # Shape: [batch, num_anchors, H, W] - bx = torch.cat(bx_list, dim=1) - # Shape: [batch, num_anchors, H, W] - by = torch.cat(by_list, dim=1) - # Shape: [batch, num_anchors, H, W] - bw = torch.cat(bw_list, dim=1) - # Shape: [batch, num_anchors, H, W] - bh = torch.cat(bh_list, dim=1) - - # Shape: [batch, 2 * num_anchors, H, W] - bx_bw = torch.cat((bx, bw), dim=1) - # Shape: [batch, 2 * num_anchors, H, W] - by_bh = torch.cat((by, bh), dim=1) - - # normalize coordinates to [0, 1] - bx_bw /= output.size(3) - by_bh /= output.size(2) - - # Shape: [batch, num_anchors * H * W, 1] - bx = bx_bw[:, :num_anchors].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1) - by = by_bh[:, :num_anchors].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1) - bw = bx_bw[:, num_anchors:].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1) - bh = by_bh[:, num_anchors:].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1) - - bx1 = bx - bw * 0.5 - by1 = by - bh * 0.5 - bx2 = bx1 + bw - by2 = by1 + bh - - # Shape: [batch, num_anchors * h * w, 4] -> [batch, num_anchors * h * w, 1, 4] - boxes = torch.cat((bx1, by1, bx2, by2), dim=2).view(output.size(0), num_anchors * output.size(2) * output.size(3), 1, 4) - # boxes = boxes.repeat(1, 1, num_classes, 1) - - # boxes: [batch, num_anchors * H * W, 1, 4] - # cls_confs: [batch, num_anchors * H * W, num_classes] - # det_confs: [batch, num_anchors * H * W] - - det_confs = det_confs.view(output.size(0), num_anchors * output.size(2) * output.size(3), 1) - confs = cls_confs * det_confs - - # boxes: [batch, num_anchors * H * W, 1, 4] - # confs: [batch, num_anchors * H * W, num_classes] - - return boxes, confs - -class YoloLayer(nn.Module): - ''' Yolo layer - model_out: while inference,is post-processing inside or outside the model - true:outside - ''' - def __init__(self, anchor_mask=[], num_classes=0, anchors=[], num_anchors=1, stride=32, model_out=False): - super(YoloLayer, self).__init__() - self.anchor_mask = anchor_mask - self.num_classes = num_classes - self.anchors = anchors - self.num_anchors = num_anchors - self.anchor_step = len(anchors) // num_anchors - self.coord_scale = 1 - self.noobject_scale = 1 - self.object_scale = 5 - self.class_scale = 1 - self.thresh = 0.6 - self.stride = stride - self.seen = 0 - self.scale_x_y = 1 - - self.model_out = model_out - - def forward(self, output, target=None): - if self.training: - return output - masked_anchors = [] - for m in self.anchor_mask: - masked_anchors += self.anchors[m * self.anchor_step:(m + 1) * self.anchor_step] - masked_anchors = [anchor / self.stride for anchor in masked_anchors] - - return yolo_forward_dynamic(output, self.thresh, self.num_classes, masked_anchors, len(self.anchor_mask),scale_x_y=self.scale_x_y) - diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/detect_objects.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/detect_objects.py deleted file mode 100644 index c799216d1..000000000 --- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/detect_objects.py +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env python3 - -import rospy -from lasr_vision_msgs.srv import YoloDetection, YoloDetectionRequest -from sensor_msgs.msg import Image - - -def detect_objects(object_names: [str], model="coco"): - """ - Detects all persons in an image - """ - rospy.wait_for_service('yolo_object_detection_server/detect_objects') - - if not isinstance(object_names, list): - raise ValueError("please input a list of strings") - - try: - detect_objects = rospy.ServiceProxy('yolo_object_detection_server/detect_objects', YoloDetection) - image_msg = rospy.wait_for_message('/xtion/rgb/image_raw', Image) # wait for rgb image - - req = YoloDetectionRequest() - req.image_raw = image_msg - req.dataset = model - req.confidence = 0.7 - req.nms = 0.3 - - yolo_resp = detect_objects(req) - except rospy.ServiceException as e: - print("Service call failed: %s" % e) - return [] - - objects = [] - for obj in yolo_resp.detected_objects: - if obj.name in object_names: - objects.append(obj) - return objects - - -if __name__ == '__main__': - rospy.init_node("objects_detection", anonymous=True) - objects = detect_objects(["person", "mug", "phone"]) - for o in objects: - print("object name: ", o.name) - print("object confidence: ", o.confidence) - print("object position: ", o.xywh) - diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/detect_objects_v8.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/detect_objects_v8.py deleted file mode 100755 index f71cf2a91..000000000 --- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/detect_objects_v8.py +++ /dev/null @@ -1,314 +0,0 @@ -#!/usr/bin/env python3 - -import rospy -from lasr_vision_msgs.srv import YoloDetection, YoloDetectionRequest -from sensor_msgs.msg import Image -from common_math import pcl_msg_to_cv2, seg_to_centroid -from std_msgs.msg import String -from geometry_msgs.msg import PointStamped, Point -from tf_module.srv import TfTransform, TfTransformRequest -import numpy as np -from lasr_shapely import LasrShapely -from cv_bridge3 import CvBridge -from sensor_msgs.msg import PointCloud2 -import cv2 -# from tiago_controllers.helpers.nav_map_helpers import is_close_to_object, rank - - - -def detect_objects(object_names: [str], confidence=0.25, nms=0.4, model="yolov8n.pt"): - """ - Detects all persons in an image using yolov8 - """ - rospy.wait_for_service("/yolov8/detect", rospy.Duration(15.0)) - - if not isinstance(object_names, list): - raise ValueError("please input a list of strings") - - try: - detect_service = rospy.ServiceProxy('/yolov8/detect', YoloDetection) - - if rospy.get_published_topics(namespace='/camera/image_raw'): - image_msg = rospy.wait_for_message('/camera/image_raw', Image) # wait for depth image - elif rospy.get_published_topics(namespace='/xtion/rgb/image_raw'): - image_msg = rospy.wait_for_message('/xtion/rgb/image_raw', Image) - elif rospy.get_published_topics(namespace='/usb_cam/image_raw'): - image_msg = rospy.wait_for_message('/usb_cam/image_raw', Image) # wait for rgb image - - - req = YoloDetectionRequest() - req.image_raw = image_msg - req.dataset = model - req.confidence = confidence - req.nms = nms - resp = detect_service(req) - print(resp) - - except rospy.ServiceException as e: - print("Service call failed: %s" % e) - return [] - - objects = [] - for obj in resp.detected_objects: - if obj.name in object_names: - objects.append(obj) - return objects - -def estimate_pose(tf, pcl_msg, detection): - centroid_xyz = seg_to_centroid(pcl_msg, np.array(detection.xyseg)) - centroid = PointStamped() - centroid.point = Point(*centroid_xyz) - centroid.header = pcl_msg.header - tf_req = TfTransformRequest() - tf_req.target_frame = String("map") - tf_req.point = centroid - response = tf(tf_req) - # print("response") - # print(response) - return np.array([response.target_point.point.x, response.target_point.point.y, response.target_point.point.z]) -# def perform_detection(yolo,tf, bridge, shapely, pcl_msg, polygon, filter, model="yolov8n-seg.pt"): -# cv_im = pcl_msg_to_cv2(pcl_msg) -# img_msg = bridge.cv2_to_imgmsg(cv_im) -# detections = yolo(img_msg, model, 0.5, 0.3) -# rospy.loginfo(detections) -# detections = [(det, estimate_pose(tf, pcl_msg, det)) for det in detections.detected_objects if -# det.name in filter] -# rospy.loginfo(f"All: {[(det.name, pose) for det, pose in detections]}") -# rospy.loginfo(f"Boundary: {polygon}") -# satisfied_points = shapely.are_points_in_polygon_2d(polygon, [[pose[0], pose[1]] for (_, pose) in -# detections]).inside -# detections = [detections[i] for i in range(0, len(detections)) if satisfied_points[i]] -# rospy.loginfo(f"Filtered: {[(det.name, pose) for det, pose in detections]}") -# print(len(detections)) -# return detections, img_msg - - -# for the comp -def perform_detection(default, pcl_msg, polygon, filter, model="yolov8n-seg.pt"): - cv_im = pcl_msg_to_cv2(pcl_msg) - img_msg = default.bridge.cv2_to_imgmsg(cv_im) - detections = default.yolo(img_msg, model, 0.5, 0.3) - # rospy.loginfo(detections) - detections = [(det, estimate_pose(default.tf, pcl_msg, det)) for det in detections.detected_objects if - det.name in filter] - - # rospy.loginfo(f"All: {[(det.name, pose) for det, pose in detections]}") - # rospy.loginfo(f"Boundary: {polygon}") - if polygon is None: - return detections, img_msg - else: - satisfied_points = default.shapely.are_points_in_polygon_2d(polygon, [[pose[0], pose[1]] for (_, pose) in - detections]).inside - detections = [detections[i] for i in range(0, len(detections)) if satisfied_points[i]] - # rospy.loginfo(f"Filtoprintered: {[(det.name, pose) for det, pose in detections]}") - print("len detections --->") - print(len(detections)) - return detections, img_msg - - -def debug(image, resp): - rospy.loginfo("Received image message") - try: - # pick the first detection as an example - if len(resp) > 0: - detection = resp[0][0] - - if len(detection.xyseg) > 0: - # unflatten the array - contours = np.array(detection.xyseg).reshape(-1, 2) - - # draw using opencv - img = np.zeros((image.height, image.width), dtype=np.uint8) - cv2.fillPoly(img, pts = [contours], color = (255,255,255)) - - # send to topic - # img_msg = bridge.cv2_to_imgmsg(img, encoding="passthrough") - # debug_publisher1.publish(img_msg) - debug_publisher1.publish(image) - else: - print('WARN: No segmentation was performed on the image!') - else: - print('WARN: no detections') - - # draw all of them - if len(resp) > 0: - img = np.zeros((image.height, image.width), dtype=np.uint8) - for detection in resp: - detection = detection[0] - if len(detection.xyseg) > 0: - contours = np.array(detection.xyseg).reshape(-1, 2) - r,g,b = np.random.randint(0, 255, size=3) - cv2.fillPoly(img, pts = [contours], color = (int(r), int(g), int(b))) - img_msg = bridge.cv2_to_imgmsg(img, encoding="passthrough") - debug_publisher2.publish(img_msg) - else: - print('WARN: no detections') - - except rospy.ServiceException as e: - rospy.logerr("Service call failed: %s" % e) - -def is_anyone_in_front_of_me(yolo,tf, bridge, shapely, pcl_msg, polygon, filter, model="yolov8n-seg.pt"): - pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) - polygon = rospy.get_param('lift_position_points') - print(polygon) - detections, im = perform_detection(yolo, tf, bridge, shapely, pcl_msg, polygon, filter, model="yolov8n-seg.pt") - return len(detections) > 0 - -DIST_THRESH = 0.1 - -def euclidean_distance(point1, point2): - return np.sqrt((point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2 + (point1[2] - point2[2]) ** 2) -# def match_detections_between_frames(det1, det2): -# matches = {} -# -# for i, position1 in enumerate(det1): -# closest_distance = float('inf') -# closest_position = None -# -# for j, position2 in enumerate(det2): -# distance = euclidean_distance(position1, position2) -# if distance < closest_distance: -# closest_distance = distance -# closest_position = position2 -# -# if closest_position is not None: -# matches[i] = closest_position -# -# robot_position = [robot_x, robot_y, robot_z] -# -# for i, position2 in matches.values(): -# vector = np.array(position2) - np.array(det1[i]) -# -# vector_to_robot = np.array(robot_position) - np.array(det1[i]) -# dot_product = np.dot(vector, vector_to_robot) -# -# if dot_product > 0: -# print(f"Position {i + 1} in frame 1 faces the robot.") -# else: -# print(f"Position {i + 1} in frame 1 does not face the robot.") -# -# static_pos = [] -# moving_pos = [] -# -# for i, position2 in matches.items(): -# initial_position = det1[i] -# final_position = position2 -# -# initial_distance_to_robot = euclidean_distance(initial_position, robot_position) -# final_distance_to_robot = euclidean_distance(final_position, robot_position) -# -# if final_distance_to_robot < initial_distance_to_robot: -# # moved closer -# moving_pos.append(i) -# elif final_distance_to_robot > initial_distance_to_robot: -# #mode further -# pass -# else: -# # remained the same -# static_pos.append(i) -# -# # face the quat -# return static_pos, moving_pos - - -def phase1(yolo, tf, bridge, shapely, pcl_msg, polygon, filter, model): - # get two frames at the time - pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) - polygon = rospy.get_param('test_lift_points') - detections1, im = perform_detection(yolo, tf, bridge, shapely, pcl_msg, polygon, filter, model) - rospy.sleep(5) - pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) - detections2, im = perform_detection(yolo, tf, bridge, shapely, pcl_msg, polygon, filter, model) - while len(detections1) != len(detections2): - detections1, im = perform_detection(yolo, tf, bridge, shapely, pcl_msg, polygon, filter, model) - rospy.sleep(5) - pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) - detections2, im = perform_detection(yolo, tf, bridge, shapely, pcl_msg, polygon, filter, model) - - # match the detections based on euclidean distance - matching = {} - not_matching = [] - for i, det1 in enumerate(detections1): - for j, det2 in enumerate(detections2): - if np.linalg.norm(det1[1] - det2[1]) < DIST_THRESH: - matching[i] = j - else: - not_matching.append(j) - - # print(not_matching) - - - - - # if the - - - - - -if __name__ == '__main__': - rospy.init_node("objects_detection_yolov8", anonymous=True) - - # object detection yolo v8 - # objects = detect_objects(["person", "mug", "phone"]) - # for o in objects: - # print("object name: ", o.name) - # print("object confidence: ", o.confidence) - # print("object position: ", o.xywh) - debug_publisher1 = rospy.Publisher('/yolov8/debug_mask', Image, queue_size=1) - debug_publisher2 = rospy.Publisher('/yolov8/debug_mask_all', Image, queue_size=1) - - # perform detection and segmentation - rospy.wait_for_service('/yolov8/detect') - yolo = rospy.ServiceProxy('/yolov8/detect', YoloDetection) - rospy.wait_for_service("/tf_transform") - tf = rospy.ServiceProxy("/tf_transform", TfTransform) - shapely = LasrShapely() - rospy.loginfo("Got shapely") - bridge = CvBridge() - rospy.loginfo("CV Bridge") - - # yolo seg only - # print("now calling perform detection") - # pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) - # polygon = rospy.get_param('test_lift_points') - # detections, im = perform_detection(yolo, tf, bridge, shapely, pcl_msg, polygon, ["person"], "yolov8n-seg.pt") - # debug(im, detections) - # print("now printing detections") - # print(len(detections)) - # pos_people = [] - # for i, person in detections: - # print(person) - # pos_people.append([person[0], person[1]]) - # - # print(person[0], person[1]) - # - # print(pos_people) - # yolo seg only - - pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) - polygon = rospy.get_param('test_lift_points') - detections, im = perform_detection(yolo, tf, bridge, shapely, pcl_msg, polygon, ["person"], "yolov8n-seg.pt") - - pos_people = [] - for i, person in detections: - # print(person) - person = person.tolist() - # print(type(person)) - pos_people.append([person[0], person[1]]) - - num_people = len(detections) - - rospy.set_param("/lift/num_people", num_people) - rospy.set_param("/lift/pos_persons", pos_people) - - pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) - polygon = rospy.get_param('test_lift_points') - # print("is anyone in front of me >") - # print(is_anyone_in_front_of_me(yolo, tf, bridge, shapely, pcl_msg, polygon, ["person"], "yolov8n-seg.pt")) - # print("is anyone in front of me") - - # detections of i and then th esecond from the tuple - # print(detections[0][1]) - rospy.spin() - diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/yolo_client.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/yolo_client.py deleted file mode 100755 index 9a6161588..000000000 --- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/yolo_client.py +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env python3 - -import rospy - -from lasr_vision_msgs.srv import YoloDetection, YoloDetectionRequest -from sensor_msgs.msg import Image -from cv_bridge3 import CvBridge -import cv2 - -if __name__ == '__main__': - rospy.init_node("yolo_client", anonymous=True) - bridge = CvBridge() - while not rospy.is_shutdown(): - rospy.wait_for_service('yolo_object_detection_server/detect_objects') - im = rospy.wait_for_message('/usb_cam/image_raw', Image) - image = bridge.imgmsg_to_cv2(im, desired_encoding='passthrough') - cv2.imwrite('hello.jpg', image) - req = YoloDetectionRequest() - req.image_raw= im - req.dataset ='coco' - req.confidence = 0.5 - req.nms = 0.3 - print('hello') - server = rospy.ServiceProxy('yolo_object_detection_server/detect_objects', YoloDetection) - detections = server(req).detected_objects - print( - f"DETECTED:{detections}\n" - ) \ No newline at end of file diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/yolo_object_detection_server.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/yolo_object_detection_server.py deleted file mode 100755 index 648e6b35b..000000000 --- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/yolo_object_detection_server.py +++ /dev/null @@ -1,184 +0,0 @@ -#!/usr/bin/env python3 -import rospy -import rospkg - -import torch -import torchvision.transforms as transforms - -import cv2 -from PIL import Image as PIL_Image -import numpy as np - -import os -import time - -from darknet_pytorch.darknet import Darknet -from darknet_pytorch.utils import post_processing - -from lasr_vision_msgs.srv import YoloDetection, YoloDetectionResponse -from lasr_vision_msgs.msg import Detection - -import nvidia_smi - - -MODEL_ROOT = os.path.join(rospkg.RosPack().get_path('lasr_object_detection_yolo'), 'models') - - -def transform(): - return transforms.Compose([ - transforms.Resize((416, 416)), - transforms.ToTensor(), - ]) - -class YoloObjectDetectionServer(): - - def __init__(self): - - self.model_name = None - self.yolov4 = None - self.labels = [] - self.device = 'cpu' - - def load_model(self, model_name): - model_path = os.path.join(MODEL_ROOT, model_name) - - if os.path.isdir(model_path): - self.model_name = model_name - - start_time = time.time() - - try: - - with open(os.path.join(model_path, 'classes.txt')) as fp: - self.labels = fp.read().strip().splitlines() - - except FileNotFoundError: - rospy.logerr(f"Couldn't load {self.model_name}, 'classes.txt' does not exist in {model_path}.") - return False - - try: - - with open(os.path.join(model_path, 'yolov4.cfg')) as fp: - self.yolov4 = Darknet(os.path.join(model_path, 'yolov4.cfg')) - - except FileNotFoundError: - rospy.logerr(f"Couldn't load {self.model_name}, 'yolov4.cfg' does not exist in {model_path}.") - return False - - try: - self.yolov4.load_weights(os.path.join(model_path, 'yolov4.weights')) - - except FileNotFoundError: - rospy.logerr(f"Couldn't load {self.model_name}, 'yolov4.weights' does not exist in {model_path}.") - return False - - self.yolov4.eval() - """ - if torch.cuda.is_available(): - - # Initialise nvidia-smi - nvidia_smi.nvmlInit() - - # Assume a single GPU. - handle = nvidia_smi.nvmlDeviceGetHandleByIndex(0) - - # Get GPU memory info. - info = nvidia_smi.nvmlDeviceGetMemoryInfo(handle) - - if info.free / 1024**2 > 1024: - self.device = 'cuda' - - # Shutdown nvidia-smi - nvidia_smi.nvmlShutdown() - """ - self.device = "cpu" - - print(self.device) - self.yolov4.to(self.device) - - rospy.loginfo('Time to load {} model: {:.2f} seconds'.format(model_name, time.time() - start_time)) - - return True - - def detect(self, req): - - response = YoloDetectionResponse() - - # Only load model if it is not already loaded. - if not self.model_name == req.dataset: - if not self.load_model(req.dataset): - raise rospy.ServiceException(f"Couldn't load model '{req.dataset}'") - - # Random colours for bounding boxes. - np.random.seed(42) - COLOURS = np.random.randint(0, 255, size=(len(self.labels), 3), dtype="uint8") - - # Perform pre-processing. - frame = np.frombuffer(req.image_raw.data, dtype=np.uint8).reshape(req.image_raw.height, req.image_raw.width, -1) - image = PIL_Image.fromarray(frame) - image = torch.stack([transform()(image)]).to(self.device) - - outputs = self.yolov4(image) - - # net forward and non-mean suppression - #try: - #except RuntimeError: - # if self.device != 'cpu': - # self.device = 'cpu' - #self.yolov4.to(self.device) - # return self.detect(req) - # else: - # raise rospy.ServiceException("Couldn't use CUDA or CPU....") - outputs = post_processing(image, req.confidence, req.nms, outputs) - - if not outputs[0] is None: - for detection in outputs[0]: - - # Class ID of detection. - idx = np.argmax(detection[5:]) - - # Convert bounding box to image co-ordinates. - bbox = detection[:4] - bbox[0] *= req.image_raw.width - bbox[1] *= req.image_raw.height - bbox[2] *= req.image_raw.width - bbox[3] *= req.image_raw.height - x1,y1,x2,y2 = [int(i) for i in bbox] - - - obj_conf, class_score = detection[4:6] - class_pred = int(detection[6]) - - # Draw and label bounding box of object in the frame. - name = self.labels[class_pred] - confidence = class_score - x, y, w, h = x1, y1, x2 - x1, y2 - y1 - color = [int(c) for c in COLOURS[idx]] - cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2) - text = "{}: {:.4f}".format(name, confidence) - cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) - - # Check detection is above the confidence threshold. - if class_score > req.confidence: - xywh = [x1, y1, x2 - x1, y2 - y1] - - # Append detection. - response.detected_objects.append( - Detection( - name=name, - confidence=class_score, - xywh=xywh - ) - ) - print(response.detected_objects, 'i am in yolo detect ') - return response - -if __name__ == '__main__': - rospy.init_node('yolo_object_detection_server') - server = YoloObjectDetectionServer() - serv = rospy.Service('yolo_object_detection_server/detect_objects', YoloDetection, server.detect) - rospy.loginfo('YOLOv4 object detection service initialised') - rospy.spin() - - - diff --git a/legacy/lasr_object_detection_yolo/srv/YoloDetection.srv b/legacy/lasr_object_detection_yolo/srv/YoloDetection.srv deleted file mode 100644 index f23dbf29f..000000000 --- a/legacy/lasr_object_detection_yolo/srv/YoloDetection.srv +++ /dev/null @@ -1,6 +0,0 @@ -sensor_msgs/Image image_raw -string dataset -float32 confidence -float32 nms ---- -lasr_vision_msgs/Detection[] detected_objects \ No newline at end of file diff --git a/legacy/lasr_object_detection_yolo/train/.template/yolov4-tiny.cfg b/legacy/lasr_object_detection_yolo/train/.template/yolov4-tiny.cfg deleted file mode 100644 index dc6f5bfb8..000000000 --- a/legacy/lasr_object_detection_yolo/train/.template/yolov4-tiny.cfg +++ /dev/null @@ -1,281 +0,0 @@ -[net] -# Testing -#batch=1 -#subdivisions=1 -# Training -batch=64 -subdivisions=1 -width=416 -height=416 -channels=3 -momentum=0.9 -decay=0.0005 -angle=0 -saturation = 1.5 -exposure = 1.5 -hue=.1 - -learning_rate=0.00261 -burn_in=1000 -max_batches = 500200 -policy=steps -steps=400000,450000 -scales=.1,.1 - -[convolutional] -batch_normalize=1 -filters=32 -size=3 -stride=2 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=64 -size=3 -stride=2 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=64 -size=3 -stride=1 -pad=1 -activation=leaky - -[route] -layers=-1 -groups=2 -group_id=1 - -[convolutional] -batch_normalize=1 -filters=32 -size=3 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=32 -size=3 -stride=1 -pad=1 -activation=leaky - -[route] -layers = -1,-2 - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=leaky - -[route] -layers = -6,-1 - -[maxpool] -size=2 -stride=2 - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=leaky - -[route] -layers=-1 -groups=2 -group_id=1 - -[convolutional] -batch_normalize=1 -filters=64 -size=3 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=64 -size=3 -stride=1 -pad=1 -activation=leaky - -[route] -layers = -1,-2 - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[route] -layers = -6,-1 - -[maxpool] -size=2 -stride=2 - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=leaky - -[route] -layers=-1 -groups=2 -group_id=1 - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=leaky - -[route] -layers = -1,-2 - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[route] -layers = -6,-1 - -[maxpool] -size=2 -stride=2 - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=1 -pad=1 -activation=leaky - -################################## - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=1 -pad=1 -activation=leaky - -[convolutional] -size=1 -stride=1 -pad=1 -filters=255 -activation=linear - - - -[yolo] -mask = 3,4,5 -anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319 -classes=80 -num=6 -jitter=.3 -scale_x_y = 1.05 -cls_normalizer=1.0 -iou_normalizer=0.07 -iou_loss=ciou -ignore_thresh = .7 -truth_thresh = 1 -random=0 -resize=1.5 -nms_kind=greedynms -beta_nms=0.6 - -[route] -layers = -4 - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[upsample] -stride=2 - -[route] -layers = -1, 23 - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=leaky - -[convolutional] -size=1 -stride=1 -pad=1 -filters=255 -activation=linear - -[yolo] -mask = 1,2,3 -anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319 -classes=80 -num=6 -jitter=.3 -scale_x_y = 1.05 -cls_normalizer=1.0 -iou_normalizer=0.07 -iou_loss=ciou -ignore_thresh = .7 -truth_thresh = 1 -random=0 -resize=1.5 -nms_kind=greedynms -beta_nms=0.6 diff --git a/legacy/lasr_object_detection_yolo/train/.template/yolov4.cfg b/legacy/lasr_object_detection_yolo/train/.template/yolov4.cfg deleted file mode 100644 index f43259bf7..000000000 --- a/legacy/lasr_object_detection_yolo/train/.template/yolov4.cfg +++ /dev/null @@ -1,1157 +0,0 @@ -[net] -batch=64 -subdivisions=8 -# Training -#width=512 -#height=512 -width=608 -height=608 -channels=3 -momentum=0.949 -decay=0.0005 -angle=0 -saturation = 1.5 -exposure = 1.5 -hue=.1 - -learning_rate=0.0013 -burn_in=1000 -max_batches = 500500 -policy=steps -steps=400000,450000 -scales=.1,.1 - -#cutmix=1 -mosaic=0 - -#:104x104 54:52x52 85:26x26 104:13x13 for 416 - -[convolutional] -batch_normalize=1 -filters=32 -size=3 -stride=1 -pad=1 -activation=mish - -# Downsample - -[convolutional] -batch_normalize=1 -filters=64 -size=3 -stride=2 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -2 - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=32 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=64 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -1,-7 - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -# Downsample - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=2 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -2 - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=64 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=64 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -1,-10 - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -# Downsample - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=2 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -2 - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -1,-28 - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -# Downsample - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=2 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -2 - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -1,-28 - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=mish - -# Downsample - -[convolutional] -batch_normalize=1 -filters=1024 -size=3 -stride=2 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -2 - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -1,-16 - -[convolutional] -batch_normalize=1 -filters=1024 -size=1 -stride=1 -pad=1 -activation=mish - -########################## - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=1024 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -### SPP ### -[maxpool] -stride=1 -size=5 - -[route] -layers=-2 - -[maxpool] -stride=1 -size=9 - -[route] -layers=-4 - -[maxpool] -stride=1 -size=13 - -[route] -layers=-1,-3,-5,-6 -### End SPP ### - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=1024 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[upsample] -stride=2 - -[route] -layers = 85 - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[route] -layers = -1, -3 - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=512 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=512 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[upsample] -stride=2 - -[route] -layers = 54 - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[route] -layers = -1, -3 - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=256 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=256 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -########################## - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=256 -activation=leaky - -[convolutional] -size=1 -stride=1 -pad=1 -filters=255 -activation=linear - - -[yolo] -mask = 0,1,2 -anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401 -classes=80 -num=9 -jitter=.3 -ignore_thresh = .7 -truth_thresh = 1 -scale_x_y = 1.2 -iou_thresh=0.213 -cls_normalizer=1.0 -iou_normalizer=0.07 -iou_loss=ciou -nms_kind=greedynms -beta_nms=0.6 -max_delta=5 - - -[route] -layers = -4 - -[convolutional] -batch_normalize=1 -size=3 -stride=2 -pad=1 -filters=256 -activation=leaky - -[route] -layers = -1, -16 - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=512 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=512 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=512 -activation=leaky - -[convolutional] -size=1 -stride=1 -pad=1 -filters=255 -activation=linear - - -[yolo] -mask = 3,4,5 -anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401 -classes=80 -num=9 -jitter=.3 -ignore_thresh = .7 -truth_thresh = 1 -scale_x_y = 1.1 -iou_thresh=0.213 -cls_normalizer=1.0 -iou_normalizer=0.07 -iou_loss=ciou -nms_kind=greedynms -beta_nms=0.6 -max_delta=5 - - -[route] -layers = -4 - -[convolutional] -batch_normalize=1 -size=3 -stride=2 -pad=1 -filters=512 -activation=leaky - -[route] -layers = -1, -37 - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=1024 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=1024 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=1024 -activation=leaky - -[convolutional] -size=1 -stride=1 -pad=1 -filters=255 -activation=linear - - -[yolo] -mask = 6,7,8 -anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401 -classes=80 -num=9 -jitter=.3 -ignore_thresh = .7 -truth_thresh = 1 -random=1 -scale_x_y = 1.05 -iou_thresh=0.213 -cls_normalizer=1.0 -iou_normalizer=0.07 -iou_loss=ciou -nms_kind=greedynms -beta_nms=0.6 -max_delta=5 \ No newline at end of file diff --git a/legacy/lasr_object_detection_yolo/train/Dockerfile b/legacy/lasr_object_detection_yolo/train/Dockerfile deleted file mode 100644 index a53eb4553..000000000 --- a/legacy/lasr_object_detection_yolo/train/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM python:3 as builder - -COPY . . -ENV PATH $PATH:/scripts - -ENV USE_GPU 0 -ENV USE_OPENCV 0 -ENV USE_OPENMP 0 -ENV USE_CUDNN 0 - -RUN mkdir datasets config weights -RUN build_darknet.sh - -ENTRYPOINT ["train.sh"] \ No newline at end of file diff --git a/legacy/lasr_object_detection_yolo/train/README.md b/legacy/lasr_object_detection_yolo/train/README.md deleted file mode 100644 index bcfca6a79..000000000 --- a/legacy/lasr_object_detection_yolo/train/README.md +++ /dev/null @@ -1,25 +0,0 @@ -## Train YOLOv4 - -For generating darknet config files and training YOLOv4. - -# Instructions - -`docker build -t darknet .` - -Mount the your data to /dataset/data: - -`docker run -v data:/datasets/data darknet` - -To run with GPU / CUDNN / OPENCV / OPENMP: - -`docker run -e USE_GPU=1 -v data:/datasets/data darknet` - -# Details - -**build_darknet.sh** builds darknet from source, modify this to change compilation options. - -**train.sh** for training YOLOv4. Usage is ./train.sh DATASET, where DATASET is a directory name in **datasets** (you must create this root directory). - -If you encounter issues with darknet, you may want to alter the **batch_size** and **subdivisions**. - -Run outside of ROS environment. \ No newline at end of file diff --git a/legacy/lasr_object_detection_yolo/train/scripts/build_darknet.sh b/legacy/lasr_object_detection_yolo/train/scripts/build_darknet.sh deleted file mode 100755 index 96694a266..000000000 --- a/legacy/lasr_object_detection_yolo/train/scripts/build_darknet.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -git clone https://github.com/AlexeyAB/darknet -make -C darknet GPU=$USE_GPU OPENCV=$USE_OPENCV CUDNN=$USE_CUDNN OPENMP=$USE_OPENMP -wget -P pretrained_weights https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v3_optimal/yolov4.conv.137 \ No newline at end of file diff --git a/legacy/lasr_object_detection_yolo/train/scripts/generate_config_files.py b/legacy/lasr_object_detection_yolo/train/scripts/generate_config_files.py deleted file mode 100644 index 39304b059..000000000 --- a/legacy/lasr_object_detection_yolo/train/scripts/generate_config_files.py +++ /dev/null @@ -1,123 +0,0 @@ -import os -import sys -import random - -# validate input -if len(sys.argv) < 2 or len(sys.argv) > 3: - print('usage: python makeTestFile.py' + \ - ' [full path to lasr_darknet_config for use on different system]') - sys.exit() - -# get dataset name -dataset = sys.argv[1] - -# lasr_darknet_config path - -lasr_path = os.path.dirname(os.path.realpath(__file__)) if not os.path.exists("/.dockerenv") else "" - -# folder paths -cfg_path = lasr_path + '/config/' + dataset -img_path = lasr_path + '/datasets/' + dataset -bkp_path = lasr_path + '/weights/' + dataset - -# file paths -data_path = cfg_path + '/' + dataset + '.data' -train_path = cfg_path + '/train_' + dataset + '.txt' -valid_path = cfg_path + '/valid_' + dataset + '.txt' -names_path = img_path + '/classes.txt' -yv4_path = cfg_path + '/yolov4.cfg' -yv4tiny_path = cfg_path + '/yolov4-tiny.cfg' -yv4tiny_tplt_path = lasr_path + '/.template/yolov4-tiny.cfg' -yv4_tplt_path = lasr_path + '/.template/yolov4.cfg' - -# validate paths -# write something here!!!! - -# create dirs -if not os.path.exists(cfg_path): - os.makedirs(cfg_path) -if not os.path.exists(bkp_path): - os.makedirs(bkp_path) - -# get number of classes -classes = 0 -with open(names_path) as f: - for line in f: - classes += 1 - -# SET GEN PATHS -if len(sys.argv) == 3: - gen_path = sys.argv[2] -else: - gen_path = lasr_path - -# set gen paths -cfg_gen_path = gen_path + '/config/' + dataset -img_gen_path = gen_path + '/datasets/' + dataset -bkp_gen_path = gen_path + '/weights/' + dataset -train_gen_path = cfg_gen_path + '/train_' + dataset + '.txt' -valid_gen_path = cfg_gen_path + '/valid_' + dataset + '.txt' -names_gen_path = img_gen_path + '/classes.txt' - -# --- CREATE TRAIN AND VALID FILES --- # -train_file = open(train_path, 'w') -valid_file = open(valid_path, 'w') - -# find jpgs in "datasets" folder and add all paths to train and valid lists -for root, dirs, files in os.walk(img_path): - for filename in files: - if filename.endswith('.jpg'): - text_out = img_gen_path + '/' + filename + '\n' - # one in ten files is a validation image - if (random.randint(-1, 10)): - train_file.write(text_out) - else: - valid_file.write(text_out) - -train_file.close() -valid_file.close() - - - -# --- CREATE DATA FILE --- # -data_file = open(data_path, 'w') -data_file.write('classes = ' + str(classes) + '\n') -data_file.write('train = ' + train_gen_path + '\n') -data_file.write('valid = ' + valid_gen_path + '\n') -data_file.write('names = ' + names_gen_path + '\n') -data_file.write('backup = ' + bkp_gen_path + '\n') - - - -# --- CREATE YOLOV4 TINY AND YOLOV4 CFG FILES --- # -cfg_file = [open(yv4tiny_path, 'w'), open(yv4_path, 'w')] -templates = [yv4tiny_tplt_path, yv4_tplt_path] - -max_batches = classes * 2000 -filters = (classes + 5) * 3 - -for i in range(2): - filters_lines = [] - filters_last = 0 - - # first pass - find filters lines to be replaced - with open(templates[i]) as f: - for lineno, line in enumerate(f): - if line.startswith('filters'): - filters_last = lineno - elif line.startswith('[yolo]'): - filters_lines.append(filters_last) - - # second pass - copy lines to new cfg with replacement - with open(templates[i]) as f: - for lineno, line in enumerate(f): - if lineno in filters_lines: - cfg_file[i].write('filters=' + str(filters) + '\n') - elif line.startswith('classes'): - cfg_file[i].write('classes=' + str(classes) + '\n') - elif line.startswith('max_batches'): - cfg_file[i].write('max_batches=' + str(max_batches) + '\n') - elif line.startswith('steps='): - cfg_file[i].write('steps=' + str((max_batches / 10) * 8) + ',' + str((max_batches / 10) * 9) + '\n') - else: - cfg_file[i].write(line) diff --git a/legacy/lasr_object_detection_yolo/train/scripts/train.sh b/legacy/lasr_object_detection_yolo/train/scripts/train.sh deleted file mode 100755 index 4d1d0e62a..000000000 --- a/legacy/lasr_object_detection_yolo/train/scripts/train.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -DATASET=$1 -python scripts/generate_config_files.py $DATASET -./darknet/darknet detector train config/$DATASET/$DATASET.data config/$DATASET/yolov4.cfg pretrained_weights/yolov4.conv.137 \ No newline at end of file diff --git a/legacy/find_person_and_ask_open_door/src/find_person_and_ask_open_door/__init__.py b/legacy/lasr_shapely/CATKIN_IGNORE similarity index 100% rename from legacy/find_person_and_ask_open_door/src/find_person_and_ask_open_door/__init__.py rename to legacy/lasr_shapely/CATKIN_IGNORE diff --git a/legacy/lasr_web_server/README.md b/legacy/lasr_web_server/README.md deleted file mode 100644 index 4f19e002f..000000000 --- a/legacy/lasr_web_server/README.md +++ /dev/null @@ -1,63 +0,0 @@ -# lasr_web_server - -The lasr_web_server package - -This package is maintained by: -- [jared](mailto:jared@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) - -- [rosbridge_suite](http://wiki.ros.org/rosbridge_suite) -- [web_video_server](http://wiki.ros.org/web_video_server) - -## Usage - -The web server will be typically run alongside the entire Interaction stack (see the Interaction README). -However, if you wish to run in isolation, run the following: - -`roslaunch lasr_web_server web_server.launch` - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -The following topics are maintained by the web server: - -| Topic | Message Type | Functionality | -| ------------------------------- | --------------- | ---------------------------------------------------------------------- | -| /lasr_web_server/set_input_text | std_msgs/String | Publish to this topic to set the placeholder text of the input box. | -| /lasr_web_server/text_input | std_msgs/String | Publishes submitted user input. Subscribe to this topic to receive it. | - -The web server is built using the following technologies: - -- [roslibjs](http://wiki.ros.org/roslibjs) -- [rosbridge](http://wiki.ros.org/rosbridge_suite) -- websocket -- [Flask](https://flask.palletsprojects.com/en/2.2.x/) -- Bootstrap, HTML, CSS, JavaScript - -## ROS Definitions - -### Launch Files - -#### `web_server` - -No description provided. - - -### Messages - -This package has no messages. - -### Services - -This package has no services. - -### Actions - -This package has no actions. diff --git a/legacy/lasr_web_server/doc/PREREQUISITES.md b/legacy/lasr_web_server/doc/PREREQUISITES.md deleted file mode 100644 index 4d3efc7b7..000000000 --- a/legacy/lasr_web_server/doc/PREREQUISITES.md +++ /dev/null @@ -1,2 +0,0 @@ -- [rosbridge_suite](http://wiki.ros.org/rosbridge_suite) -- [web_video_server](http://wiki.ros.org/web_video_server) diff --git a/legacy/lasr_web_server/doc/TECHNICAL.md b/legacy/lasr_web_server/doc/TECHNICAL.md deleted file mode 100644 index b5ad815d5..000000000 --- a/legacy/lasr_web_server/doc/TECHNICAL.md +++ /dev/null @@ -1,14 +0,0 @@ -The following topics are maintained by the web server: - -| Topic | Message Type | Functionality | -| ------------------------------- | --------------- | ---------------------------------------------------------------------- | -| /lasr_web_server/set_input_text | std_msgs/String | Publish to this topic to set the placeholder text of the input box. | -| /lasr_web_server/text_input | std_msgs/String | Publishes submitted user input. Subscribe to this topic to receive it. | - -The web server is built using the following technologies: - -- [roslibjs](http://wiki.ros.org/roslibjs) -- [rosbridge](http://wiki.ros.org/rosbridge_suite) -- websocket -- [Flask](https://flask.palletsprojects.com/en/2.2.x/) -- Bootstrap, HTML, CSS, JavaScript diff --git a/legacy/lasr_web_server/doc/USAGE.md b/legacy/lasr_web_server/doc/USAGE.md deleted file mode 100644 index cc5c957f1..000000000 --- a/legacy/lasr_web_server/doc/USAGE.md +++ /dev/null @@ -1,4 +0,0 @@ -The web server will be typically run alongside the entire Interaction stack (see the Interaction README). -However, if you wish to run in isolation, run the following: - -`roslaunch lasr_web_server web_server.launch` diff --git a/legacy/lasr_web_server/launch/web_server.launch b/legacy/lasr_web_server/launch/web_server.launch deleted file mode 100644 index 18566d5ad..000000000 --- a/legacy/lasr_web_server/launch/web_server.launch +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - \ No newline at end of file diff --git a/legacy/lasr_web_server/package.xml b/legacy/lasr_web_server/package.xml deleted file mode 100644 index 53f2194ab..000000000 --- a/legacy/lasr_web_server/package.xml +++ /dev/null @@ -1,59 +0,0 @@ - - - lasr_web_server - 0.0.0 - The lasr_web_server package - - - - - jared - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - - - - - - - - diff --git a/legacy/lasr_web_server/setup.py b/legacy/lasr_web_server/setup.py deleted file mode 100644 index dedbe18df..000000000 --- a/legacy/lasr_web_server/setup.py +++ /dev/null @@ -1,12 +0,0 @@ - -#!/usr/bin/env python3 - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -setup_args = generate_distutils_setup( - packages=['lasr_web_server'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/lasr_web_server/src/lasr_web_server/__init__.py b/legacy/lasr_web_server/src/lasr_web_server/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/legacy/lasr_web_server/src/lasr_web_server/static/styles/styles.css b/legacy/lasr_web_server/src/lasr_web_server/static/styles/styles.css deleted file mode 100644 index 170ad0afc..000000000 --- a/legacy/lasr_web_server/src/lasr_web_server/static/styles/styles.css +++ /dev/null @@ -1,39 +0,0 @@ - /* Remove default bullets */ - ul, #myUL { - list-style-type: none; - } - - /* Remove margins and padding from the parent ul */ - #myUL { - margin: 0; - padding: 0; - } - - /* Style the caret/arrow */ - .caret { - cursor: pointer; - user-select: none; /* Prevent text selection */ - } - - /* Create the caret/arrow with a unicode, and style it */ - .caret::before { - content: "\25B6"; - color: black; - display: inline-block; - margin-right: 6px; - } - - /* Rotate the caret/arrow icon when clicked on (using JavaScript) */ - .caret-down::before { - transform: rotate(90deg); - } - - /* Hide the nested list */ - .nested { - display: none; - } - - /* Show the nested list when the user clicks on the caret/arrow (with JavaScript) */ - .active { - display: block; - } \ No newline at end of file diff --git a/legacy/lasr_web_server/src/lasr_web_server/templates/main.html b/legacy/lasr_web_server/src/lasr_web_server/templates/main.html deleted file mode 100644 index a28c3df74..000000000 --- a/legacy/lasr_web_server/src/lasr_web_server/templates/main.html +++ /dev/null @@ -1,289 +0,0 @@ - - - - TIAGO Website - - - - - - - - - - - - - -
-

TIAGo Webpage

-

Provides text-based interaction as well as dignostics and logs.

-
-
-
-

Topic Information

-
    -
  • - Robot Pose -
      -
    • - Position -
        -
      • x: 0
      • -
      • y: 0
      • -
      • z: 0
      • -
      -
    • -
    • - Orientation -
        -
      • x: 0
      • -
      • y: 0
      • -
      • z: 0
      • -
      • w: 0
      • -
      -
    • - - -
    -
  • Clock -
      -
    • secs: 0
    • -
    • nsecs: 0
    • -
    -
  • -
  • RGB Image -
      -
    • -
    -
  • -
-
-

Text Interaction

-
- -
- -
-
- - -

Conversation History

-

-
-
-

Logs

-

-
-
-
-
- - diff --git a/legacy/lasr_web_server/src/lasr_web_server/web_server.py b/legacy/lasr_web_server/src/lasr_web_server/web_server.py deleted file mode 100755 index 16e05d1d7..000000000 --- a/legacy/lasr_web_server/src/lasr_web_server/web_server.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env python3 - -import rospy -import rospkg -from flask import Flask, render_template -from flask_socketio import SocketIO, emit - -import os - -TEMPLATE_ROOT = os.path.join(rospkg.RosPack().get_path("lasr_web_server"), "src", "lasr_web_server", "templates") -HOST = '0.0.0.0' -PORT = 4000 - -app = Flask(__name__, template_folder = TEMPLATE_ROOT) - -@app.route('/') -def render_main(): - return render_template("main.html") - - -if __name__ == "__main__": - app.run(host=HOST, port=PORT, debug=True) \ No newline at end of file diff --git a/legacy/listen_module/CMakeLists.txt b/legacy/listen_module/CMakeLists.txt deleted file mode 100644 index a7e36a2ee..000000000 --- a/legacy/listen_module/CMakeLists.txt +++ /dev/null @@ -1,207 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(listen_module) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - geometry_msgs - roscpp - rospy - std_msgs - message_generation -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html - catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder - add_service_files( - FILES - DialogListen.srv - ) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here - generate_messages( - DEPENDENCIES - geometry_msgs std_msgs -) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES listen_module -# CATKIN_DEPENDS geometry_msgs roscpp rospy std_msgs -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/listen_module.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/listen_module_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_listen_module.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/legacy/listen_module/README.md b/legacy/listen_module/README.md deleted file mode 100644 index 81c6623e5..000000000 --- a/legacy/listen_module/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# listen_module - -The listen_module package - -This package is maintained by: -- [nicole](mailto:nicole@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- geometry_msgs (build) -- roscpp (build) -- rospy (build) -- std_msgs (build) -- geometry_msgs (exec) -- roscpp (exec) -- rospy (exec) -- std_msgs (exec) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -This package has no launch files. - -### Messages - -This package has no messages. - -### Services - -#### `DialogListen` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| action | string | | -| subaction | string | | -| query_text | string | | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| status | bool | | -| result | string | | - - -### Actions - -This package has no actions. diff --git a/legacy/listen_module/__init__.py b/legacy/listen_module/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/legacy/listen_module/package.xml b/legacy/listen_module/package.xml deleted file mode 100644 index 24111eb56..000000000 --- a/legacy/listen_module/package.xml +++ /dev/null @@ -1,71 +0,0 @@ - - - listen_module - 0.0.0 - The listen_module package - - - - - nicole - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - geometry_msgs - roscpp - rospy - std_msgs - geometry_msgs - roscpp - rospy - std_msgs - geometry_msgs - roscpp - rospy - std_msgs - - - - - - - - diff --git a/legacy/listen_module/setup.py b/legacy/listen_module/setup.py deleted file mode 100644 index e38e9fd5e..000000000 --- a/legacy/listen_module/setup.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python3 - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -setup_args = generate_distutils_setup( - packages=['listen_module'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/listen_module/src/__init__.py b/legacy/listen_module/src/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/legacy/listen_module/src/listen_module/__init__.py b/legacy/listen_module/src/listen_module/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/legacy/listen_module/src/listen_module/dialogflow_client.py b/legacy/listen_module/src/listen_module/dialogflow_client.py deleted file mode 100755 index 499d97d03..000000000 --- a/legacy/listen_module/src/listen_module/dialogflow_client.py +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/env python3 -import rospy, os, rospkg, uuid -from stream_handler import StreamHandler -import sounddevice as sd -from google.cloud import dialogflow_v2 as dialogflow - -# Inspired by the LASR team dialogflow package! Thank you for teaching me so much! - -INPUT_DEVICE = sd.query_hostapis()[0]['default_input_device'] -DEVICE_INFO = sd.query_devices(INPUT_DEVICE, 'input') -RATE = int(DEVICE_INFO["default_samplerate"]) # Sampling rate in Hz -CHANNELS = 1 # Number of channels -BLOCK_SIZE = 4096 # Number of frames per block -FORMAT = "int16" # Data type of audio samples -LANGUAGE_CODE = "en-GB" # Language code of the query - - -class DialogClient: - def __init__(self, project_id, session_id=str(uuid.uuid4())): - self.project_id = project_id - self.project_credentials = os.path.join(rospkg.RosPack().get_path('listen_module'), 'config', - '{}.json'.format(self.project_id)) - self.session_id = session_id - self.session_client = dialogflow.SessionsClient.from_service_account_file(self.project_credentials) - self.session = self.session_client.session_path(self.project_id, self.session_id) - self.session_stream = None - - def detect_intent(self, audio_or_text, query_params=None): - if audio_or_text == "SOUND:PLAYING:PLEASE": - self.session_stream = StreamHandler(self.session_client, self.session) - return self.detect_audio(query_params) - else: - return self.detect_text(audio_or_text, query_params) - - def detect_text(self, text, query_params=None): - text_input = dialogflow.types.TextInput(text=text, language_code=LANGUAGE_CODE) - query_input = dialogflow.types.QueryInput(text=text_input) - request = dialogflow.DetectIntentRequest( - session=self.session, - query_input=query_input, - query_params=query_params - ) - response = self.session_client.detect_intent(request=request) - print(response, response.query_result.fulfillment_text, response.query_result.intent.display_name, - "resp in text") - return response - - - def detect_audio(self, query_params=None): - responses = self.session_stream.get_responses(self.session_stream, query_params) - response = None - for response in responses: - print(response, "response in audio") - if response.recognition_result.message_type == dialogflow.StreamingRecognitionResult.MessageType.END_OF_SINGLE_UTTERANCE: - print("End of single utterance") - self.stop() - if not response.query_result.query_text: - print("No query text") - return response - - def stop(self): - self.session_stream.stop() - - -if __name__ == '__main__': - rospy.init_node('dialogflow_client') - try: - client = DialogClient("newagent-groe") - # client.detect_intent("CAn you press button 7") - client.detect_intent("SOUND:PLAYING:PLEASE") - - - except Exception as e: - print(e) - print("Shutting down") diff --git a/legacy/listen_module/src/listen_module/dialogflow_srv.py b/legacy/listen_module/src/listen_module/dialogflow_srv.py deleted file mode 100755 index 4a991f69c..000000000 --- a/legacy/listen_module/src/listen_module/dialogflow_srv.py +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env python3 -import rospy -from listen_module.srv import DialogListen, DialogListenRequest, DialogListenResponse -from listen_module.subactions import ButtonAction, NavigationAction -import sounddevice as sd -import os, rospkg, re -from proto.marshal.collections.maps import MapComposite - - -# Audio recording parameters -INPUT_DEVICE = sd.query_hostapis()[0]['default_input_device'] -DEVICE_INFO = sd.query_devices(INPUT_DEVICE, 'input') -RATE = int(DEVICE_INFO["default_samplerate"]) # Sampling rate in Hz -CHANNELS = 1 # Number of channels -BLOCK_SIZE = 4096 # Number of frames per block -FORMAT = "int16" # Data type of audio samples -LANGUAGE_CODE = "en-GB" # Language code of the query - -RECORD_SECONDS = 10 -GOOGLE_APPLICATION_CREDENTIALS = os.path.join(rospkg.RosPack().get_path('listen_module'), 'config', - '{}.json'.format('newagent-groe')) - - -class DialogflowServer: - def __init__(self): - self.handler = { - "BUTTON_PRESSED": { - "id": "newagent-groe", - "action": ButtonAction - - }, - "ROOM_REQUEST": { - "id": "newagent-groe", - "action": NavigationAction - } - } - self.dialog_srv = rospy.Service("/dialogflow_server", DialogListen, self.handle_audio_cb) - self.stream = sd.RawInputStream(samplerate=RATE, blocksize=BLOCK_SIZE, device=INPUT_DEVICE, - dtype=FORMAT, - channels=CHANNELS) - - - - def handle_audio_cb(self, request: DialogListenRequest): - rospy.loginfo("Received request in dialog server") - rospy.loginfo(request.query_text) - resp = DialogListenResponse() - - - action_id = self.handler[request.action]["id"] - action = self.handler[request.action]["action"](action_id=action_id) - result = action.subaction[request.subaction](audio_or_text=request.query_text) # check if is text - print(action, result, "action, subaction, result") - try: - if result: - resp.status = True - if isinstance(result, MapComposite) or isinstance(result, dict): - result = dict(result) - print(result, "result" + ("-" * 100)) - result = str(result["name"]) - print(result, "result" + ("-" * 100)) - if re.search(r'^[-+]?[0-9]*\.?[0-9]+$', str(result)) or result.isdigit(): - result = float(result) - print(result) - resp.result = str(result) - except Exception as e: - rospy.logerr(e) - resp.status = False - resp.result = str("Error in handling audio") - return resp - - -if __name__ == '__main__': - rospy.init_node('dialogflow_server') - server = DialogflowServer() - rospy.loginfo("Dialogflow server is ready to take input") - rospy.spin() diff --git a/legacy/listen_module/src/listen_module/stream_handler.py b/legacy/listen_module/src/listen_module/stream_handler.py deleted file mode 100755 index 69bdf9655..000000000 --- a/legacy/listen_module/src/listen_module/stream_handler.py +++ /dev/null @@ -1,130 +0,0 @@ -#!/usr/bin/env python3 -import sounddevice as sd -from proto.marshal.collections.maps import MapComposite -from google.cloud import dialogflow_v2 as dialogflow -import rospkg, os, rospy, time, uuid - -# Define some parameters for the audio stream -INPUT_DEVICE = sd.query_hostapis()[0]['default_input_device'] -print(INPUT_DEVICE) -DEVICE_INFO = sd.query_devices(INPUT_DEVICE, 'input') -RATE = int(DEVICE_INFO["default_samplerate"]) # Sampling rate in Hz, ERRORS might occur as Expressions of alsa -RATE = 22050 -print(RATE) -CHANNELS = 1 # Number of channels -BLOCK_SIZE = 4096 # Number of frames per block -FORMAT = "int16" # Data type of audio samples -LANGUAGE_CODE = "en-GB" # Language code of the query -GOOGLE_APPLICATION_CREDENTIALS = os.path.join(rospkg.RosPack().get_path('listen_module'), 'config', - '{}.json'.format('newagent-groe')) -# GOOGLE_APPLICATION_CREDENTIALS = "/home/nicole/robocup/rexy/src/rexy/listen_module/config/newagent-groe.json" - -class StreamHandler: - def __init__(self, session_client,session): - self.session = session - self.session_client = session_client - self.stop_requested = False - self.query_params = None - - - def stop(self): - self.stop_requested = True - - def audio_generator(self): - start_time = time.time() - rospy.loginfo("Starting stream handler") - with sd.RawInputStream(samplerate=RATE, channels=CHANNELS, dtype=FORMAT) as self.stream: - while self.stream.active: - if self.stop_requested or rospy.is_shutdown() or time.time() - start_time > 10000: - rospy.loginfo("Stopping stream handler") - break - data, overflowed = self.stream.read(BLOCK_SIZE) - if overflowed: - print("Warning: some audio data was lost") - yield bytes(data) - - # Define a generator function that yields Dialogflow requests - def request_generator(self): - # Create the initial request with the session and query input - request = dialogflow.StreamingDetectIntentRequest( - session=self.session, - query_params=self.query_params, - query_input=dialogflow.QueryInput( - audio_config=dialogflow.InputAudioConfig( - audio_encoding=dialogflow.AudioEncoding.AUDIO_ENCODING_LINEAR_16, - sample_rate_hertz=RATE, - language_code=LANGUAGE_CODE, - single_utterance=True, - ), - ), - ) - yield request - - # Loop over the audio blocks from the microphone - for input_audio in self.audio_generator(): - # Create a request with the input audio - request = dialogflow.StreamingDetectIntentRequest(input_audio=input_audio) - yield request - - - def get_responses(self, st, query_params=None): - # send the requests to the dialogflow api and get the responses - try: - self.query_params = query_params - self.stop_requested = False - responses = st.session_client.streaming_detect_intent(st.request_generator()) - self.query_params = None - return responses - except Exception as e: - rospy.loginfo(str(e) + " error in request of stream handler!") - -if __name__ == '__main__': - - rospy.init_node('stream_handler') - project_id = 'newagent-groe' - session_id = str(uuid.uuid4()) - # project_credentials = os.path.join(rospkg.rospack().get_path('listen_module'), 'config', - # '{}.json'.format(project_id)) - project_credentials = "/home/nicole/robocup/rexy/src/rexy/listen_module/config/newagent-groe.json" - session_client = dialogflow.SessionsClient.from_service_account_file(project_credentials) - session = session_client.session_path(project_id, session_id) - st = StreamHandler(session_client, session) - # send the requests to the dialogflow api and get the responses - try: - responses = st.session_client.streaming_detect_intent(st.request_generator()) - print(responses) - - print("=" * 20) - try: - for response in responses: - print( - 'intermediate transcript: "{}".'.format( - response.recognition_result.transcript - ) - ) - - # note: the result from the last response is the final transcript along - # with the detected content. - query_result = response.query_result - - print("=" * 20) - print("query text: {}".format(query_result.query_text)) - print( - "detected intent: {} (confidence: {})\n".format( - query_result.intent.display_name, query_result.intent_detection_confidence - ) - ) - print("fulfillment text: {}\n".format(query_result.fulfillment_text)) - # get the parameters - parameters = query_result.parameters - if parameters: - for parameter_name, parameter_value in parameters.items(): - print(f"parameter: {parameter_name} = {parameter_value}") - if isinstance(parameter_value, MapComposite): - parameter_values = dict(parameter_value) - print(parameter_values) - except Exception as e: - print(e, "error in response") - except Exception as e: - rospy.loginfo(str(e) + " error in request of stream handler!") - diff --git a/legacy/listen_module/src/listen_module/subactions/__init__.py b/legacy/listen_module/src/listen_module/subactions/__init__.py deleted file mode 100644 index f75913906..000000000 --- a/legacy/listen_module/src/listen_module/subactions/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .base import BaseActions -from .navigationAction import NavigationAction -from .button import ButtonAction \ No newline at end of file diff --git a/legacy/listen_module/src/listen_module/subactions/base.py b/legacy/listen_module/src/listen_module/subactions/base.py deleted file mode 100755 index c2b829b60..000000000 --- a/legacy/listen_module/src/listen_module/subactions/base.py +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env python3 -from google.cloud import dialogflow_v2 as dialogflow -from listen_module.dialogflow_client import DialogClient -from proto.marshal.collections.maps import MapComposite -import uuid - - -class BaseActions: - def __init__(self, action_id): - self.action_id = action_id - self.client = DialogClient(action_id) - self.session_id = uuid.uuid4() - - def get_context(self, context, lifespan=1): - return dialogflow.types.Context( - name=dialogflow.ContextsClient.context_path(self.action_id, self.session_id, context), - lifespan_count=lifespan) - - def dialog_in_context(self, audio_or_text: str = "SOUND:PLAYING:PLEASE", context: str = None): - query_params = None - if context: - query_params = dialogflow.types.QueryParameters(contexts=[self.get_context(context)]) - return self.client.detect_intent(audio_or_text=audio_or_text, query_params=query_params) - - - def get_parameters(self, response): - response = response.query_result.parameters - resp = [] - # for response in responses: - if response: - res = {} - query_result = response.query_result - query_text = query_result.query_text - parameters = query_result.parameters - for parameter_name, parameter_value in parameters.items(): - if isinstance(parameter_value, MapComposite): - parameter_values = dict(parameter_value) - resp.append((parameter_name, parameter_values)) - else: - resp.append((parameter_name, parameter_value)) - - res["query_text"] = query_text - res["parameters"] = parameters - res["query_result"] = query_result - res["response"] = resp - print(res, "res in audio") - return res - - def stop(self): - self.client.stop() diff --git a/legacy/listen_module/src/listen_module/subactions/button.py b/legacy/listen_module/src/listen_module/subactions/button.py deleted file mode 100755 index 6c479eb67..000000000 --- a/legacy/listen_module/src/listen_module/subactions/button.py +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env python3 - -import rospy -from listen_module.subactions import BaseActions - -class ButtonAction(BaseActions): - def __init__(self, action_id): - super(ButtonAction, self).__init__(action_id) - - self.subaction = { - "confirm_button": self.confirm_button, - "confirm_floor": self.ask_floor, - } - - def ask_floor(self, audio_or_text): - return self.handle_dialog_in_context(self.dialog_in_context(audio_or_text=audio_or_text, context="ConfirmFloor"), - context="ConfirmFloor") - - def confirm_button(self, audio_or_text): - return self.handle_dialog_in_context( - self.dialog_in_context(audio_or_text=audio_or_text, context="ConfirmButton"), context="ConfirmButton") - - @staticmethod - def handle_dialog_in_context(resp, context): - try: - # confirm button or floor - if context == "ConfirmButton" or context == "ConfirmFloor": - return resp.query_result.parameters["yesno"] - else: - rospy.loginfo("No context found in the navigation action") - raise ValueError - except ValueError: - rospy.loginfo("I am not sure if tthe buttons are pressed and the floor is selected.") - return -1 diff --git a/legacy/listen_module/src/listen_module/subactions/navigationAction.py b/legacy/listen_module/src/listen_module/subactions/navigationAction.py deleted file mode 100755 index 566a06785..000000000 --- a/legacy/listen_module/src/listen_module/subactions/navigationAction.py +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env python3 - -import rospy -from listen_module.subactions import BaseActions - - -class NavigationAction(BaseActions): - def __init__(self, action_id): - super(NavigationAction, self).__init__(action_id) - - self.subaction = { - "confirm_location": self.confirm_location, - "ask_location": self.ask_location, - } - - def ask_location(self, audio_or_text): - return self.handle_dialog_in_context(self.dialog_in_context(audio_or_text=audio_or_text, context="AskLocation"), - context="AskLocation") - - def confirm_location(self, audio_or_text): - return self.handle_dialog_in_context( - self.dialog_in_context(audio_or_text=audio_or_text, context="ConfirmLocation"), context="ConfirmLocation") - - @staticmethod - def handle_dialog_in_context(resp, context): - try: - # number, person, Destination - if context == "ConfirmLocation": - return resp.query_result.parameters["yesno"] - elif context == "AskLocation": - print(resp.query_result) - return resp.query_result.parameters["Destination"] or resp.query_result.parameters["number"] or \ - resp.query_result.parameters["person"] - else: - rospy.loginfo("No context found in the navigation action") - raise ValueError - except ValueError: - print("I don't know where you want to go.") diff --git a/legacy/listen_module/srv/DialogListen.srv b/legacy/listen_module/srv/DialogListen.srv deleted file mode 100644 index 8fb8ea81c..000000000 --- a/legacy/listen_module/srv/DialogListen.srv +++ /dev/null @@ -1,7 +0,0 @@ -string action -string subaction -string query_text ---- -bool status -string result - diff --git a/legacy/markers/README.md b/legacy/markers/README.md deleted file mode 100644 index 5fca57db9..000000000 --- a/legacy/markers/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# markers - -The markers package - -This package is maintained by: -- [nicole](mailto:nicole@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- geometry_msgs (build) -- message_generation (build) -- roscpp (build) -- rospy (build) -- std_msgs (build) -- geometry_msgs (exec) -- roscpp (exec) -- rospy (exec) -- std_msgs (exec) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -This package has no launch files. - -### Messages - -This package has no messages. - -### Services - -This package has no services. - -### Actions - -This package has no actions. diff --git a/legacy/markers/setup.py b/legacy/markers/setup.py deleted file mode 100644 index b3f12485e..000000000 --- a/legacy/markers/setup.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -# fetch values from package.xml -setup_args = generate_distutils_setup( - packages=['markers'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/markers/src/markers/__init__.py b/legacy/markers/src/markers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/legacy/markers/src/markers/markers_helpers.py b/legacy/markers/src/markers/markers_helpers.py deleted file mode 100755 index 8b2c15b44..000000000 --- a/legacy/markers/src/markers/markers_helpers.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python3 -import rospy -from visualization_msgs.msg import Marker - -def create_point_marker(x, y, z, idx, g=1.0): - marker_msg = Marker() - marker_msg.header.frame_id = "map" - marker_msg.header.stamp = rospy.Time.now() - marker_msg.id = idx - marker_msg.type = Marker.SPHERE - marker_msg.action = Marker.ADD - marker_msg.pose.position.x = x - marker_msg.pose.position.y = y - marker_msg.pose.position.z = z - marker_msg.pose.orientation.w = 1.0 - marker_msg.scale.x = 0.1 - marker_msg.scale.y = 0.1 - marker_msg.scale.z = 0.1 - marker_msg.color.a = 1.0 - marker_msg.color.r = 0.0 - marker_msg.color.g = g - marker_msg.color.b = 0.0 - return marker_msg - -if __name__ == '__main__': - rospy.init_node("viz") - - wait_pos = rospy.Publisher("/wait_pos_debug", Marker, queue_size=100) - wait_points = rospy.get_param("/wait_position_points") - for i, point in enumerate(wait_points): - print(i, point, "wait") - rospy.sleep(1) - wait_pos.publish(create_point_marker(point[0], point[1], 0, i,g=0.5)) - - - # wait_points_cen = rospy.get_param("/wait_position_center_point") - # wait_pos_cen = rospy.Publisher("/wait_position_center_point_debugger", Marker, queue_size=100) - # for point in wait_points_cen: - # print(point) - # wait_pos_cen.publish(create_point_marker(point[0], point[1], 0, 0)) - - - # lift_pos = rospy.Publisher("/lift_pos_debugger", Marker, queue_size=100) - # lift_pose = rospy.get_param("/lift_position_points") - # for i, point in enumerate(lift_pose): - # rospy.sleep(1) - # lift_pos.publish(create_point_marker(point[0], point[1], 0, i)) - - # lift_center = rospy.get_param("/lift_position_center_point") - # print(lift_center) - # lift_cen = rospy.Publisher("/lift_pos_center_debugger", Marker, queue_size=100) - # # for i, point in enumerate(lift_center): - # # print(i, point,"center lift") - # lift_cen.publish(create_point_marker(lift_center[0], lift_center[1], 0, 100, g=0.1)) - - while not rospy.is_shutdown(): - rospy.spin() diff --git a/legacy/meet_and_greet/CMakeLists.txt b/legacy/meet_and_greet/CMakeLists.txt deleted file mode 100644 index e07fb36af..000000000 --- a/legacy/meet_and_greet/CMakeLists.txt +++ /dev/null @@ -1,206 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(meet_and_greet) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - roscpp - rospy - std_msgs -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder -# add_service_files( -# FILES -# Service1.srv -# Service2.srv -# ) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -# generate_messages( -# DEPENDENCIES -# std_msgs -# ) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES meet_and_greet -# CATKIN_DEPENDS roscpp rospy std_msgs -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/meet_and_greet.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/meet_and_greet_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_meet_and_greet.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/legacy/meet_and_greet/README.md b/legacy/meet_and_greet/README.md deleted file mode 100644 index efc8f077d..000000000 --- a/legacy/meet_and_greet/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# meet_and_greet - -The meet_and_greet package - -This package is maintained by: -- [nicole](mailto:nicole@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- roscpp (build) -- rospy (build) -- std_msgs (build) -- roscpp (exec) -- rospy (exec) -- std_msgs (exec) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -> How to run - -```python -roslaunch lasr_perception_server perception_server.launch -rosrun meet_and_greet simple_meet_and_greet.py - -``` - -Make sure you have either: - -```python -roslaunch custom_worlds wrs_receptionist.launch -roslaunch usb_cam usb_cam-test.launch -``` - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -> The Meet and greet package files: - -1. Explore suroundings state -> robot wondering around (tbc) -2. Look around state -> robot moving the head and searchign for people, to output known and unknown people (tbc) -3. Meet and greet sm -> teh state machine implementing preemtive states (tbc) -4. SImple meet and greet -> the demo file to represent the recognition - -## ROS Definitions - -### Launch Files - -This package has no launch files. - -### Messages - -This package has no messages. - -### Services - -This package has no services. - -### Actions - -This package has no actions. diff --git a/legacy/meet_and_greet/config/map_points.yaml b/legacy/meet_and_greet/config/map_points.yaml deleted file mode 100644 index 900017367..000000000 --- a/legacy/meet_and_greet/config/map_points.yaml +++ /dev/null @@ -1,44 +0,0 @@ -point1: - position: - x: -0.696476736395 - y: -1.26116825784 - z: 0.0 - orientation: - x: 0.0 - y: 0.0 - z: -0.0435142405385 - w: 0.999052806848 - - -point2: - position: - x: 0.587315805429 - y: -0.886969920382 - z: 0.0 - orientation: - x: 0.0 - y: 0.0 - z: -0.887756799014 - w: 0.460312791268 - -door_simu: - position: - x: 6.133732680056999 - y: -0.7289013393838328 - z: 0.0 - orientation: - x: 0.0 - y: 0.0 - z: 0.028572161371893147 - w: 0.9995917324560756 - -door: - position: - x: -0.8110280734845722 - y: -1.7996335500178748 - z: 0.0 - orientation: - x: 0.0 - y: 0.0 - z: -0.9671729273733352 - w: 0.2541191227674403 \ No newline at end of file diff --git a/legacy/meet_and_greet/doc/TECHNICAL.md b/legacy/meet_and_greet/doc/TECHNICAL.md deleted file mode 100644 index 566d892e0..000000000 --- a/legacy/meet_and_greet/doc/TECHNICAL.md +++ /dev/null @@ -1,6 +0,0 @@ -> The Meet and greet package files: - -1. Explore suroundings state -> robot wondering around (tbc) -2. Look around state -> robot moving the head and searchign for people, to output known and unknown people (tbc) -3. Meet and greet sm -> teh state machine implementing preemtive states (tbc) -4. SImple meet and greet -> the demo file to represent the recognition diff --git a/legacy/meet_and_greet/doc/USAGE.md b/legacy/meet_and_greet/doc/USAGE.md deleted file mode 100644 index 157aa24cf..000000000 --- a/legacy/meet_and_greet/doc/USAGE.md +++ /dev/null @@ -1,15 +0,0 @@ -> How to run - -```python -# NOTE: lasr_perception_server package has been removed -roslaunch lasr_perception_server perception_server.launch -rosrun meet_and_greet simple_meet_and_greet.py - -``` - -Make sure you have either: - -```python -roslaunch custom_worlds wrs_receptionist.launch -roslaunch usb_cam usb_cam-test.launch -``` diff --git a/legacy/meet_and_greet/package.xml.excluded b/legacy/meet_and_greet/package.xml.excluded deleted file mode 100644 index 3952e47fc..000000000 --- a/legacy/meet_and_greet/package.xml.excluded +++ /dev/null @@ -1,68 +0,0 @@ - - - meet_and_greet - 0.0.0 - The meet_and_greet package - - - - - nicole - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - roscpp - rospy - std_msgs - roscpp - rospy - std_msgs - roscpp - rospy - std_msgs - - - - - - - - diff --git a/legacy/meet_and_greet/setup.py b/legacy/meet_and_greet/setup.py deleted file mode 100644 index 70e89d64d..000000000 --- a/legacy/meet_and_greet/setup.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python3 - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -setup_args = generate_distutils_setup( - packages=['meet_and_greet'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/meet_and_greet/src/meet_and_greet/__init__.py b/legacy/meet_and_greet/src/meet_and_greet/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/legacy/meet_and_greet/src/meet_and_greet/explore_surroundings_state.py b/legacy/meet_and_greet/src/meet_and_greet/explore_surroundings_state.py deleted file mode 100755 index 63233f7ba..000000000 --- a/legacy/meet_and_greet/src/meet_and_greet/explore_surroundings_state.py +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env python3 -# coding=utf-8 -import smach -import rospy -import random -from tiago_controllers.helpers import get_pose_from_param, is_running -from tiago_controllers.controllers import Controllers - -class ExploreSurroundingsState(smach.State): - ''' - Wonders around and detects people - ''' - - def __init__(self, controllers): - smach.State.__init__(self, - outcomes=['finished_exploring'] - ) - - self.controllers = controllers - self.map_points = ['/point1', '/point2'] # pos on map - - def execute(self, userdata): - # wonder between points - index = 0 - while not rospy.is_shutdown(): - if self.preempt_requested(): - # cancel the current base goal - # \TODO: check if there is a process - self.controllers.base_controller.cancel() - rospy.logwarn(' Preempting the Explore Surroundings state') - self.service_preempt() - return 'finished_exploring' - if not is_running(self.controllers.base_controller.get_client()): - print('i am here', index) - self.controllers.base_controller.async_to_pose(get_pose_from_param(self.map_points[index])) - if index == 1: - index = 0 - else: - index = 1 - - -if __name__ == '__main__': - rospy.init_node('smach_example_state_machine') - con = Controllers() - sm = smach.StateMachine(outcomes=['finished_exploring']) - with sm: - smach.StateMachine.add('EXPLORE_SURROUNDINGS', ExploreSurroundingsState(con), - transitions={'finished_exploring':'finished_exploring'},) - outcome = sm.execute() - rospy.loginfo('I have completed execution with outcome: ') - rospy.loginfo(outcome) diff --git a/legacy/meet_and_greet/src/meet_and_greet/look_around_state.py b/legacy/meet_and_greet/src/meet_and_greet/look_around_state.py deleted file mode 100755 index bebed07f8..000000000 --- a/legacy/meet_and_greet/src/meet_and_greet/look_around_state.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python3 -import os -import smach -import rospy, rospkg -from sensor_msgs.msg import Image -from cv_bridge3 import CvBridge, cv2 -from lasr_perception_server.srv import DetectImage -from std_msgs.msg import Empty - -class LookAroundState(smach.State): - ''' - Looks aroudn and tries to detect people - ''' - - def __init__(self, controllers): - smach.State.__init__(self, - outcomes=['finished_without_info', 'finished_with_known_ppl', 'finished_with_unknown_ppl']) - - self.controllers = controllers - if self.controllers.head_controller: - self.rotate_head = self.controllers.head_controller.sync_reach_to - else: - self.rotate_head = lambda *args, **kwargs : None - - self.pub = rospy.Publisher('/sm_reset', Empty, queue_size=1) - def find_faces(self): - print('in find faces') - imgs = [] - rate = rospy.Rate(3) - for i in range(2): - imgs.append(rospy.wait_for_message("/xtion/rgb/image_raw", Image)) - rate.sleep() - - det = rospy.ServiceProxy("lasr_perception_server/detect_objects_image", DetectImage) - resp = det(imgs, "coco", 0.7, 0.3, ["person"],'known_people').detected_objects - return resp - - def execute(self, userdata): - direction = 0.2 - for i in range(2): - direction *= -1 - self.rotate_head(direction, -0.2, velocities=[0.1, 0.0]) - rospy.sleep(15) - if True: - # if self.find_faces(): - print(self.find_faces()) - self.pub.publish(Empty()) - return 'finished_with_known_ppl' - # elif len(self.find_faces()) > 2: - # return 'finished_with_unknown_ppl' - - # return 'finished_without_info' - - -# if __name__ == '__main__': -# rospy.init_node('look_around_state') -# sm = smach.StateMachine(outcomes=['success']) -# with sm: -# smach.StateMachine.add('LOOK_AROUND_STATE', LookAroundState(), -# transitions={'found_people':'finished_looking_around'}) -# outcome = sm.execute() -# rospy.loginfo('I have completed execution with outcome: ') -# rospy.loginfo(outcome) diff --git a/legacy/meet_and_greet/src/meet_and_greet/meet_and_greet_sm.py b/legacy/meet_and_greet/src/meet_and_greet/meet_and_greet_sm.py deleted file mode 100755 index 76661cb08..000000000 --- a/legacy/meet_and_greet/src/meet_and_greet/meet_and_greet_sm.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python3 -# coding=utf-8 -import rospy -import smach -import sys -from smach import Concurrence -from smach_ros import IntrospectionServer -from explore_surroundings_state import ExploreSurroundingsState -from look_around_state import LookAroundState -from tiago_controllers.controllers import Controllers -import smach_ros -from std_msgs.msg import Empty - - -def out_cb(outcome_map): - if outcome_map['LOOK_AROUND'] == 'finished_with_known_ppl': - return 'finished_wandering' - if outcome_map['LOOK_AROUND'] == 'finished_with_unknown_ppl': - return 'finished_wandering' - else: - return 'wander_around' - -def child_term_cb(outcome_map): - if outcome_map['LOOK_AROUND'] == 'finished_with_known_ppl': - return True - elif outcome_map['LOOK_AROUND'] == 'finished_with_unknown_ppl': - return True - elif outcome_map['MONITORING'] == 'invalid': - return True - else: - return False -def monitoring_cb(ud, msg): - return False -class MeetAndGreetSM(smach.StateMachine): - def __init__(self): - smach.StateMachine.__init__(self, outcomes=['finished_meet_and_greet', 'aborted']) - self.controllers = Controllers() - - with self: - cc = Concurrence(outcomes=['finished_wandering','wander_around'], - default_outcome='wander_around', - output_keys=['detection_map'], - outcome_cb=out_cb, - child_termination_cb=child_term_cb - ) - with cc: - smach.Concurrence.add('EXPLORE_SURROUNDINGS', ExploreSurroundingsState(self.controllers)) - smach.Concurrence.add('LOOK_AROUND', LookAroundState(self.controllers)) - smach.Concurrence.add('MONITORING', smach_ros.MonitorState("/sm_reset", Empty, monitoring_cb)) - - smach.StateMachine.add('WANDERING', cc, - transitions={'finished_wandering': 'finished_meet_and_greet', 'wander_around':'WANDERING'}) - - - -if __name__ == "__main__": - rospy.init_node("meet_and_greet", sys.argv, anonymous=True) - sm = MeetAndGreetSM() - sis = IntrospectionServer('iserver', sm, 'SM_ROOT') - sis.start() - outcome = sm.execute() - sis.stop() - rospy.loginfo(f'I have completed execution with outcome {outcome}') diff --git a/legacy/meet_and_greet/src/meet_and_greet/simple_meet_and_greet.py b/legacy/meet_and_greet/src/meet_and_greet/simple_meet_and_greet.py deleted file mode 100755 index 3e20920d2..000000000 --- a/legacy/meet_and_greet/src/meet_and_greet/simple_meet_and_greet.py +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env python3 -import rospy -import os -import rospkg -from lasr_voice.voice import Voice -from tiago_controllers.helpers import get_pose_from_param, is_running -from sensor_msgs.msg import Image -from cv_bridge3 import CvBridge, cv2 -from lasr_perception_server.srv import DetectImage -from tiago_controllers.controllers import Controllers - -IMAGES = 1 - -class SimpleMeetGreet: - def __init__(self): - self.controllers = Controllers() - - if rospy.get_published_topics(namespace='/xtion'): - print('in xtion') - self.topic = '/xtion/rgb/image_raw' - else: - print('in usb') - self.topic = '/usb_cam/image_raw' - - - self.voice = Voice() - self.map_points = ['/point1', '/point2'] # pos on map - - def find_person(self): - imgs = [] - rate = rospy.Rate(3) - for i in range(IMAGES): - im = rospy.wait_for_message('/usb_cam/image_raw', Image) - imgs.append(im) - # imgs.append(rospy.wait_for_message(self.topic, Image)) - rate.sleep() - - det = rospy.ServiceProxy("lasr_perception_server/detect_objects_image", DetectImage) - resp = det(imgs, "coco", 0.7, 0.3, ["person"], 'known_people').detected_objects - print(resp) - return resp - - def handle_findings(self, detections): - names = [] - counter = 0 - if len(detections)> 0: - for human in detections: - print('human name', human.name) - if human.name == 'person': - counter = counter + 1 - else: - self.voice.sync_tts("Hi" +str(human.name)) - - print('hi, ', human.name) - if counter > 0: - # self.voice.sync_tts(' there are new people. I have not met ' + str(counter) + 'people') - if counter == 1: - # self.voice.sync_tts("there are new people. I have not met you before") - print(' there are new people. I have not met you before') - else: - # self.voice.sync_tts(' there are new people. I have not met ' + str(counter) + 'people') - print(' there are new people. I have not met ' + str(counter) + 'people') - - - - def main(self): - # pos = get_pose_from_param('/door') - # self.controllers.base_controller.sync_to_pose(pos) - for i in range(4): - resp = self.find_person() - if len(resp) > 0: - self.handle_findings(resp) - return - - # self.voice.sync_tts("Hi, i don't know anyone") - print("Hi, i don't know anyone") - print('I dont known anyone') - - - - -if __name__ == '__main__': - rospy.init_node("simple_meet_and_greet", anonymous=True) - test = SimpleMeetGreet() - test.main() diff --git a/legacy/lasr_dialogflow/src/lasr_dialogflow/__init__.py b/legacy/narrow_space_navigation/CATKIN_IGNORE similarity index 100% rename from legacy/lasr_dialogflow/src/lasr_dialogflow/__init__.py rename to legacy/narrow_space_navigation/CATKIN_IGNORE diff --git a/legacy/object_interest_tracking/.gitignore b/legacy/object_interest_tracking/.gitignore deleted file mode 100644 index dcd4e925d..000000000 --- a/legacy/object_interest_tracking/.gitignore +++ /dev/null @@ -1,272 +0,0 @@ -# Created by https://www.toptal.com/developers/gitignore/api/ros,python,venv,visualstudiocode,linux -# Edit at https://www.toptal.com/developers/gitignore?templates=ros,python,venv,visualstudiocode,linux - -### Linux ### -*~ - -# temporary files which can be created if a process still has a handle open of a deleted file -.fuse_hidden* - -# KDE directory preferences -.directory - -# Linux trash folder which might appear on any partition or disk -.Trash-* - -# .nfs files are created when an open file is removed but is still being accessed -.nfs* - -### Python ### -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -*.py,cover -.hypothesis/ -.pytest_cache/ -cover/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 -db.sqlite3-journal - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -.pybuilder/ -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# IPython -profile_default/ -ipython_config.py - -# pyenv -# For a library or package, you might want to ignore these files since the code is -# intended to run in multiple environments; otherwise, check them in: -# .python-version - -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# poetry -# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. -# This is especially recommended for binary packages to ensure reproducibility, and is more -# commonly ignored for libraries. -# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control -#poetry.lock - -# pdm -# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. -#pdm.lock -# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it -# in version control. -# https://pdm.fming.dev/#use-with-ide -.pdm.toml - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm -__pypackages__/ - -# Celery stuff -celerybeat-schedule -celerybeat.pid - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ - -# pytype static type analyzer -.pytype/ - -# Cython debug symbols -cython_debug/ - -# PyCharm -# JetBrains specific template is maintained in a separate JetBrains.gitignore that can -# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore -# and can be added to the global gitignore or merged into this file. For a more nuclear -# option (not recommended) you can uncomment the following to ignore the entire idea folder. -#.idea/ - -### Python Patch ### -# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration -poetry.toml - -# ruff -.ruff_cache/ - -# LSP config files -pyrightconfig.json - -### ROS ### -devel/ -logs/ -bin/ -msg_gen/ -srv_gen/ -msg/*Action.msg -msg/*ActionFeedback.msg -msg/*ActionGoal.msg -msg/*ActionResult.msg -msg/*Feedback.msg -msg/*Goal.msg -msg/*Result.msg -msg/_*.py -build_isolated/ -devel_isolated/ - -# Generated by dynamic reconfigure -*.cfgc -/cfg/cpp/ -/cfg/*.py - -# Ignore generated docs -*.dox -*.wikidoc - -# eclipse stuff -.project -.cproject - -# qcreator stuff -CMakeLists.txt.user - -srv/_*.py -*.pcd -*.pyc -qtcreator-* -*.user - -/planning/cfg -/planning/docs -/planning/src - - -# Emacs -.#* - -# Catkin custom files -CATKIN_IGNORE - -### venv ### -# Virtualenv -# http://iamzed.com/2009/05/07/a-primer-on-virtualenv/ -[Bb]in -[Ii]nclude -[Ll]ib -[Ll]ib64 -[Ll]ocal -[Ss]cripts -pyvenv.cfg -pip-selfcheck.json - -### VisualStudioCode ### -.vscode/* -!.vscode/settings.json -!.vscode/tasks.json -!.vscode/launch.json -!.vscode/extensions.json -!.vscode/*.code-snippets - -# Local History for Visual Studio Code -.history/ - -# Built Visual Studio Code Extensions -*.vsix - -### VisualStudioCode Patch ### -# Ignore all local history of files -.history -.ionide - -# End of https://www.toptal.com/developers/gitignore/api/ros,python,venv,visualstudiocode,linux diff --git a/legacy/object_interest_tracking/CMakeLists.txt b/legacy/object_interest_tracking/CMakeLists.txt deleted file mode 100644 index b2af86842..000000000 --- a/legacy/object_interest_tracking/CMakeLists.txt +++ /dev/null @@ -1,210 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(object_interest_tracking) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - geometry_msgs - message_generation - message_runtime - rospy - sensor_msgs - std_msgs -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -# catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder -add_service_files( - FILES - Tdr.srv -) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -generate_messages( - DEPENDENCIES - std_msgs - geometry_msgs - sensor_msgs -) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES engagementScore - CATKIN_DEPENDS geometry_msgs message_generation message_runtime rospy sensor_msgs std_msgs -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/engagementScore.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/engagementScore_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_engagementScore.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/legacy/object_interest_tracking/README.MD b/legacy/object_interest_tracking/README.MD deleted file mode 100644 index 07c8a7258..000000000 --- a/legacy/object_interest_tracking/README.MD +++ /dev/null @@ -1,30 +0,0 @@ -## Under Development -# -This repo aims to provide a solution to who the robot should engage with in a human-like social environment in ROS. The package currently works only in Noetic. Due to mediapipe requirements of python > 3.8, the package won't run in melodic. - -The current process takes an image frame, then detect people on the frame, then separate them to apply the scoring pipeline. Currently, the service will output the frame of the person [xywh] to engage with. It will always output a person, even if the person has a low engagement score. - -# Services -## engagementScore -Input: Nothing - -Output: A list that has the frame part of the person to engage with [xywh] - -### Scoring - -The current scoring takes into account emotions, head position, and availability of distractions. It will be extended to include more criteria. - - -# Future updates (todo list) - -Add distance and the change in distance to the scoring pipeline - -Add gesture detection - -# References - -A. M. Al-Nuimi and G. J. Mohammed, "Face Direction Estimation based on Mediapipe Landmarks," 2021 7th International Conference on Contemporary Information Technology and Mathematics (ICCITM), Mosul, Iraq, 2021, pp. 185-190, doi: 10.1109/ICCITM53167.2021.9677878. - - -# -## Tested only in simulation, yet to be tested in a robot diff --git a/legacy/object_interest_tracking/README.md b/legacy/object_interest_tracking/README.md deleted file mode 100644 index d63ea8343..000000000 --- a/legacy/object_interest_tracking/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# object_interest_tracking - -The object_interest_tracking package - -This package is maintained by: -- [yousef](mailto:yousef@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- geometry_msgs (build) -- message_generation (build) -- rospy (build) -- sensor_msgs (build) -- std_msgs (build) -- message_runtime (build) -- geometry_msgs (exec) -- message_runtime (exec) -- rospy (exec) -- sensor_msgs (exec) -- std_msgs (exec) -- message_generation (exec) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -This package has no launch files. - -### Messages - -This package has no messages. - -### Services - -#### `Tdr` - -Request - -| Field | Type | Description | -|:-:|:-:|---| - -Response - -| Field | Type | Description | -|:-:|:-:|---| - - -### Actions - -This package has no actions. diff --git a/legacy/object_interest_tracking/models/face_landmarker.task b/legacy/object_interest_tracking/models/face_landmarker.task deleted file mode 100644 index c50c845d1..000000000 Binary files a/legacy/object_interest_tracking/models/face_landmarker.task and /dev/null differ diff --git a/legacy/object_interest_tracking/package.xml b/legacy/object_interest_tracking/package.xml deleted file mode 100644 index a50f2ad43..000000000 --- a/legacy/object_interest_tracking/package.xml +++ /dev/null @@ -1,75 +0,0 @@ - - - object_interest_tracking - 0.0.0 - The object_interest_tracking package - - - - - yousef - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - geometry_msgs - message_generation - rospy - sensor_msgs - std_msgs - message_runtime - geometry_msgs - rospy - sensor_msgs - std_msgs - geometry_msgs - message_runtime - rospy - sensor_msgs - std_msgs - message_generation - - - - - - - - diff --git a/legacy/object_interest_tracking/setup.py b/legacy/object_interest_tracking/setup.py deleted file mode 100644 index 06ad1322b..000000000 --- a/legacy/object_interest_tracking/setup.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python3 - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -setup_args = generate_distutils_setup( - packages=['object_interest_tracking'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/object_interest_tracking/src/__init__.py b/legacy/object_interest_tracking/src/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/legacy/object_interest_tracking/src/detect_interest_srv.py b/legacy/object_interest_tracking/src/detect_interest_srv.py deleted file mode 100755 index af2f7e259..000000000 --- a/legacy/object_interest_tracking/src/detect_interest_srv.py +++ /dev/null @@ -1,162 +0,0 @@ -#!/usr/bin/env python3 -import rospy -import numpy as np -import cv2 -from sensor_msgs.msg import Image, PointCloud2 -from object_interest_tracking.srv import Tdr, TdrResponse -from cv_bridge3 import CvBridge -from lasr_vision_msgs.srv import YoloDetection -from geometry_msgs.msg import Point, PointStamped -import ros_numpy as rnp -from std_msgs.msg import String -from tf_module.srv import TfTransform, TfTransformRequest -from tiago_controllers import BaseController -from math import acos -from lasr_shapely import LasrShapely - -from tiago_controllers.helpers.pose_helpers import get_pose_from_param - -from lasr_object_detection_yolo.detect_objects_v8 import detect_objects, perform_detection, debug - -def estimate_pose(pcl_msg, cv_im, detection): - tf = rospy.ServiceProxy("/tf_transform", TfTransform) - contours = np.array(detection).reshape(-1, 2) - mask = np.zeros((cv_im.shape[0], cv_im.shape[1]), np.uint8) - cv2.fillPoly(mask, pts=[contours], color=(255, 255, 255)) - indices = np.argwhere(mask) - if indices.shape[0] == 0: - return np.array([np.inf, np.inf, np.inf]) - pcl_xyz = rnp.point_cloud2.pointcloud2_to_xyz_array(pcl_msg, remove_nans=False) - - xyz_points = [] - for x, y in indices: - x, y, z = pcl_xyz[x][y] - xyz_points.append([x, y, z]) - - x, y, z = np.nanmean(xyz_points, axis=0) - centroid = PointStamped() - centroid.point = Point(x, y, z) - centroid.header = pcl_msg.header - tf_req = TfTransformRequest() - tf_req.target_frame = String("map") - tf_req.point = centroid - tf_req.source_frame = String(pcl_msg.header.frame_id) - response = tf(tf_req) - return response.target_point.point - - -def toNPArray(a): - return np.array([a.x, a.y]) - - -def orderIndices(current, previous): - if len(current) == 1 and len(previous) == 1: - return [0] - - indices = [] - if len(current) == len(previous): - for ma in range(len(current)): - diffs = list(map(lambda lo: np.linalg.norm(toNPArray(current[ma]) - toNPArray(lo)), previous)) - indices.append(diffs.index(min(diffs))) - - return indices - - -def getPoseDiff(lastRecorded, x, y): - distances = [] - for i in lastRecorded: - distances.append(np.linalg.norm(toNPArray(i) - np.array([x, y]))) - return distances - - - - -def v2(req): - cvBridge = CvBridge() - shapley = LasrShapely() - rospy.wait_for_service('/yolov8/detect') - corners = rospy.get_param("/corners_arena") - print(corners, "corners") - - objectRecognitionService = rospy.ServiceProxy('/yolov8/detect', YoloDetection) - basePose = get_pose_from_param("/phase3_lift/pose") - print(basePose, "base pose") - - # BaseController().sync_to_pose(basePose) - - headPoint = None - found = False - ANGLE_THRESHOLD = 10 - REACHING_THRESHOLDS = 2 - - while not found: - rospy.sleep(0.5) - locs = [] - for i in range(2): - img_msg = rospy.wait_for_message('/xtion/rgb/image_raw', Image) - img_depth_msg = rospy.wait_for_message('/xtion/depth_registered/points', PointCloud2) - - - currentLocs = list(filter(lambda i: i.name == "person", - objectRecognitionService(img_msg, 'yolov8n-seg.pt', 0.7, 0.3).detected_objects)) - - cm = [] - rospy.logwarn("0") - - for i2 in range(len(currentLocs)): - # Assume 2 ppl! - pose = estimate_pose(img_depth_msg, cvBridge.imgmsg_to_cv2_np(img_msg), currentLocs[i2].xyseg) - if (shapley.is_point_in_polygon_2d(corners, pose.x, pose.y)): - cm.append(pose) - - locs.append(cm) - rospy.logwarn(len(locs[0])) - - print("vecs11") - - if i == 1 and len(locs[0]) > 0 and len(locs[0]) == len(locs[1]): - # swap if needed - newIndices = orderIndices(cm, locs[0]) - - if (newIndices == [0]): - locs.append(cm) - else: - locs.append([cm[i] for i in newIndices]) - - # CALC VECS - vecs = [] - for i in range(len(locs[1])): - rospy.logerr(locs[1][i]) - a2 = toNPArray(locs[1][i]) - toNPArray(locs[0][i]) - vecs.append(a2) - print("vecs") - - print(vecs) - - # CALC ANGLES - [x, y, q] = BaseController().get_current_pose() - - angles = [] - print(vecs) - for i1 in len(vecs): - angles.append(acos((vecs[i1] - np.array([x, y]))) / ( - np.linalg.norm(vecs[i1]) * np.linalg.norm(np.array([x, y])))) - - distances = getPoseDiff(locs[1], x, y) - rospy.logwarn("dis ANG") - rospy.logwarn(min(angles)) - rospy.logwarn(distances[angles.index(min(angles))]) - - # FACE PERSON - if min(angles) < ANGLE_THRESHOLD and distances[angles.index(min(angles))] < REACHING_THRESHOLDS: - headPoint = locs[angles.index(min(angles))] - found = True - - print(headPoint) - BaseController().sync_face_to(headPoint.x, headPoint.y) - return TdrResponse() - - -rospy.init_node("objectEngagementTracking") -rospy.Service('v2', Tdr, v2) -rospy.spin() \ No newline at end of file diff --git a/legacy/object_interest_tracking/src/settings.py b/legacy/object_interest_tracking/src/settings.py deleted file mode 100644 index 863e8208f..000000000 --- a/legacy/object_interest_tracking/src/settings.py +++ /dev/null @@ -1,3 +0,0 @@ -MEDIAPIPE_MODEL_PATH = "./src/engagmentScore/models/face_landmarker.task" -ANGLE_SCORE = lambda x: -1/5*x+2 -DISCOUNT_FACTOR = 0.5 \ No newline at end of file diff --git a/legacy/object_interest_tracking/srv/Tdr.srv b/legacy/object_interest_tracking/srv/Tdr.srv deleted file mode 100644 index 73b314ff7..000000000 --- a/legacy/object_interest_tracking/srv/Tdr.srv +++ /dev/null @@ -1 +0,0 @@ ---- \ No newline at end of file diff --git a/legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/__init__.py b/legacy/pcl_segmentation/CATKIN_IGNORE similarity index 100% rename from legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/__init__.py rename to legacy/pcl_segmentation/CATKIN_IGNORE diff --git a/legacy/pcl_segmentation/package.xml b/legacy/pcl_segmentation/package.xml index b19b36a36..79753ad87 100644 --- a/legacy/pcl_segmentation/package.xml +++ b/legacy/pcl_segmentation/package.xml @@ -7,7 +7,7 @@ - jared + Jared Swift diff --git a/legacy/people_detection/__init__.py b/legacy/people_detection/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/legacy/people_detection/create_dataset/CMakeLists.txt b/legacy/people_detection/create_dataset/CMakeLists.txt deleted file mode 100644 index 3689a14ab..000000000 --- a/legacy/people_detection/create_dataset/CMakeLists.txt +++ /dev/null @@ -1,205 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(create_dataset) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - rospy - std_msgs -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder -# add_service_files( -# FILES -# Service1.srv -# Service2.srv -# ) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -# generate_messages( -# DEPENDENCIES -# std_msgs -# ) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES create_dataset -# CATKIN_DEPENDS rospy std_msgs -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/create_dataset.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/create_dataset_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_create_dataset.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/legacy/people_detection/create_dataset/README.md b/legacy/people_detection/create_dataset/README.md deleted file mode 100644 index 5b2eb3449..000000000 --- a/legacy/people_detection/create_dataset/README.md +++ /dev/null @@ -1,47 +0,0 @@ -# create_dataset - -The create_dataset package - -This package is maintained by: -- [nicole](mailto:nicole@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- rospy (build) -- std_msgs (build) -- rospy (exec) -- std_msgs (exec) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -This package has no launch files. - -### Messages - -This package has no messages. - -### Services - -This package has no services. - -### Actions - -This package has no actions. diff --git a/legacy/people_detection/create_dataset/package.xml b/legacy/people_detection/create_dataset/package.xml deleted file mode 100644 index eeea94592..000000000 --- a/legacy/people_detection/create_dataset/package.xml +++ /dev/null @@ -1,65 +0,0 @@ - - - create_dataset - 0.0.0 - The create_dataset package - - - - - nicole - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - rospy - std_msgs - rospy - std_msgs - rospy - std_msgs - - - - - - - - diff --git a/legacy/people_detection/create_dataset/setup.py b/legacy/people_detection/create_dataset/setup.py deleted file mode 100644 index d18c5fb2c..000000000 --- a/legacy/people_detection/create_dataset/setup.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python3 - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -setup_args = generate_distutils_setup( - packages=['create_dataset'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/people_detection/create_dataset/src/create_dataset/__init__.py b/legacy/people_detection/create_dataset/src/create_dataset/__init__.py deleted file mode 100644 index 9d80ae3aa..000000000 --- a/legacy/people_detection/create_dataset/src/create_dataset/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .create_dataset_state import CreateDatasetState -from .talk_create_dataset_state import TalkCreateDatasetState \ No newline at end of file diff --git a/legacy/people_detection/create_dataset/src/create_dataset/create_dataset_state.py b/legacy/people_detection/create_dataset/src/create_dataset/create_dataset_state.py deleted file mode 100755 index 198d6c71a..000000000 --- a/legacy/people_detection/create_dataset/src/create_dataset/create_dataset_state.py +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env python3 -# coding=utf-8 -import os -import shutil -import smach -import rospy -from sensor_msgs.msg import Image -import rospkg -from cv_bridge3 import CvBridge -from cv_bridge3 import cv2 - -import random -import string - - -IMAGE_NUMBER = 10 -SLEEP_TIME = 0.8 -MAX_DURATION = IMAGE_NUMBER*SLEEP_TIME + 2 - -class CreateDatasetState(smach.State): - ''' - Creates a dataset of a person and trains a model to redetect that individual. - To be able to train, the dataset must contain at least 2 people - ''' - def __init__(self): - smach.State.__init__(self, - outcomes=['finished_scan'], - output_keys=['dataset_path'] - ) - self.name = "" - self.path = "" - self.dir_path = self.path = os.path.join(rospkg.RosPack().get_path('create_dataset'), 'dataset') - self.bridge = CvBridge() - self.images_taken = 0 - self.semaphore = False - - # Choose topic - if rospy.get_published_topics(namespace='/xtion'): - self.topic = '/xtion/rgb/image_raw' - else: - self.topic = '/usb_cam/image_raw' - - self.sub = rospy.Subscriber(self.topic, Image, self.img_callback) - - def img_callback(self, msg): - if self.semaphore: - if rospy.Time.now().to_sec() - self.last_time.to_sec() >= SLEEP_TIME and self.images_taken < IMAGE_NUMBER: - cv_image = self.bridge.imgmsg_to_cv2_np(msg) - cv2.imwrite(os.path.join(self.path, f'{self.images_taken}.jpg'), cv_image) - self.images_taken += 1 - print("*" *30," - IMAGE IS TAKEN ", self.images_taken) - self.last_time = rospy.Time.now() - elif self.images_taken >= IMAGE_NUMBER or rospy.Time.now().to_sec() - self.start_time.to_sec() > MAX_DURATION: - self.semaphore = False - - def execute(self, userdata): - rand = ''.join(random.choice(string.ascii_lowercase) for _ in range(7)) - userdata.dataset_path = self.path = os.path.join(rospkg.RosPack().get_path('create_dataset'), 'dataset', rand) - - if os.path.isdir(self.path): - shutil.rmtree(self.path) - os.mkdir(self.path) - - self.semaphore = True - self.start_time = self.last_time = rospy.Time.now() - while self.semaphore: - pass - self.images_taken = 0 - return 'finished_scan' - - -if __name__ == '__main__': - rospy.init_node('smach_example_state_machine') - sm = smach.StateMachine(outcomes=['success']) - with sm: - smach.StateMachine.add('CREATE_DATASET', CreateDatasetState(), - transitions={'finished_scan':'success'}, - remapping={'dataset_path':'dataset_path'}) - outcome = sm.execute() - rospy.loginfo('I have completed execution with outcome: ') - rospy.loginfo(outcome) \ No newline at end of file diff --git a/legacy/people_detection/create_dataset/src/create_dataset/talk_create_dataset_state.py b/legacy/people_detection/create_dataset/src/create_dataset/talk_create_dataset_state.py deleted file mode 100755 index c0b3cbf4c..000000000 --- a/legacy/people_detection/create_dataset/src/create_dataset/talk_create_dataset_state.py +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env python3 -# coding=utf-8 -import rospy -import smach -import random -# from models import Person - -SLEEP_TIME = 5 - -class Person: - """ A person class that saves all the information about a person that the robot meets """ - - def __init__(self, name, fav_drink): - self.name = name - self.fav_drink = fav_drink - - - def get_fav_drink(self): - return self.fav_drink - - def __str__(self): - return ( - f"name : {self.name}\n" - f"fav drink : {self.fav_drink}\n" - ) -class TalkCreateDatasetState(smach.State): - def __init__(self): - smach.State.__init__(self, outcomes=['finished_collecting'], output_keys=['current_person']) - print("start") - self.random_names = ['belly', 'mimi', 'teo', 'pipi'] - self.random_drinks = ['vodka', 'cola', 'pepsi'] - - def execute(self, userdata): - - name = random.choice(self.random_names) - print("What is your name?",name) - - rospy.sleep(SLEEP_TIME) - - drink = random.choice(self.random_drinks) - print("What's your favourite drink?", drink) - - rospy.sleep(SLEEP_TIME) - - print(f"that's boring, i promise you will like {drink}") - userdata.current_person = Person(name, drink) - # userdata.current_guest = Guest(name, drink) - return 'finished_collecting' - # self.dialog_client.send_goal_and_wait(DialogGoal('receptionist')) - # name = rospy.get_param("/guest/name", "unknown") - # if name == "unknown": - # name = None - # # drink = rospy.get_param("/guest/drink", "unknown") - # # if drink == "unknown": - # drink = None \ No newline at end of file diff --git a/legacy/people_detection/recognise_people/CMakeLists.txt b/legacy/people_detection/recognise_people/CMakeLists.txt deleted file mode 100644 index 0eaf4e41a..000000000 --- a/legacy/people_detection/recognise_people/CMakeLists.txt +++ /dev/null @@ -1,208 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(recognise_people) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - rospy - std_msgs - sensor_msgs - message_runtime - message_generation - lasr_vision_msgs -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Detection.msg -# ) - -## Generate services in the 'srv' folder -add_service_files( - FILES - RecognisePeople.srv -) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -generate_messages( - DEPENDENCIES - std_msgs - sensor_msgs - lasr_vision_msgs -) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES lasr_object_detection_yolo -# CATKIN_DEPENDS rospy std_msgs -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/lasr_object_detection_yolo.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/lasr_object_detection_yolo_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_lasr_object_detection_yolo.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/legacy/people_detection/recognise_people/README.md b/legacy/people_detection/recognise_people/README.md deleted file mode 100644 index 4351c3394..000000000 --- a/legacy/people_detection/recognise_people/README.md +++ /dev/null @@ -1,62 +0,0 @@ -# recognise_people - -The recognise_people package - -This package is maintained by: -- [nicole](mailto:nicole@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- rospy (build) -- std_msgs (build) -- rospy (exec) -- std_msgs (exec) -- lasr_vision_msgs - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -This package has no launch files. - -### Messages - -This package has no messages. - -### Services - -#### `RecognisePeople` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| detected_objects_yolo | lasr_vision_msgs/Detection[] | | -| detected_objects_opencv | lasr_vision_msgs/Detection[] | | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| detected_objects | lasr_vision_msgs/Detection[] | | - - -### Actions - -This package has no actions. diff --git a/legacy/people_detection/recognise_people/package.xml b/legacy/people_detection/recognise_people/package.xml deleted file mode 100644 index 90bdd6695..000000000 --- a/legacy/people_detection/recognise_people/package.xml +++ /dev/null @@ -1,66 +0,0 @@ - - - recognise_people - 0.0.0 - The recognise_people package - - - - - nicole - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - rospy - std_msgs - rospy - std_msgs - rospy - std_msgs - lasr_vision_msgs - - - - - - - - \ No newline at end of file diff --git a/legacy/people_detection/recognise_people/setup.py b/legacy/people_detection/recognise_people/setup.py deleted file mode 100644 index aca85b46a..000000000 --- a/legacy/people_detection/recognise_people/setup.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python3 - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -setup_args = generate_distutils_setup( - packages=['recognise_people'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/people_detection/recognise_people/src/recognise_people/recognise_people_srv.py b/legacy/people_detection/recognise_people/src/recognise_people/recognise_people_srv.py deleted file mode 100755 index c4a48c862..000000000 --- a/legacy/people_detection/recognise_people/src/recognise_people/recognise_people_srv.py +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env python3 - -from recognise_people.srv import RecognisePeople, RecognisePeopleResponse -import rospy -from cv_bridge3 import CvBridge -from lasr_vision_msgs.msg import Detection - -from cv_bridge3 import cv2 -import random, os, rospkg - -class RecognisePeopleServer(): - """ - A Server for recognising known people using yolo and opencv - """ - - def __init__(self): - - self.face_detections = [] - self.yolo_detections = [] - self.bridge = CvBridge() - - - #todo: get known people form rosparam or another funct - - def recogniser(self, req): - response = RecognisePeopleResponse() - - self.yolo_detections = req.detected_objects_yolo - self.face_detections = req.detected_objects_opencv - detection_map = {} - # print(self.face_detections[0].name) - # print('-'*40) - # print(self.yolo_detections[0].name) - i = 0 - if len(self.yolo_detections) < 1 and len(self.face_detections) < 1: - print('empty resp') - return response - - # get bb of person using face bb and body bb - # maps name -> face(name, confidence, xywh) and yolo(name, conf, xywh) - for face in self.face_detections: - for yolo in self.yolo_detections: - x1, y1, x2, y2 = face.xywh - print(yolo, 'the yoloooo ') - - head_x = (x1 + x2) / 2 - body_x = (yolo.xywh[0] + yolo.xywh[2]) / 2 # center of the body in the x axis - # self.image_show(yolo.name, yolo.confidence,yolo.xywh, i) - i = i+1 - - if abs(head_x - body_x) < 20: - if not face.name in detection_map.keys(): - print('detection map', face, yolo) - detection_map[face.name] = (face, yolo) - print("yolo detection here, bb overlapped.") - break - # if newer detection has bigger confidence - if yolo.confidence > detection_map[face.name][1].confidence: - detection_map[face.name] = (face, yolo) - else: - print(face.name, face.xywh, yolo.xywh) - - print('-'* 40) - print(detection_map, 'detection map\n') - - if len(self.face_detections) > len(self.yolo_detections) and len(self.face_detections) > 0: - for face in self.face_detections: - # Append detection - response.detected_objects.append( - Detection( - name=face.name, - confidence=face.confidence, - xywh=face.xywh - ) - ) - print('the face recognitons are', face) - elif len(self.face_detections) < 1 and len(self.yolo_detections) > 0: - for person in self.yolo_detections: - # Append detection. - response.detected_objects.append( - Detection( - name=person.name, - confidence=person.confidence, - xywh=person.xywh - ) - ) - else: - response = [] - - print(response) - return response - - def image_show(self, name, proba, dim, i): - x1, y1, x2, y2 = dim - path_output = os.path.join(rospkg.RosPack().get_path('face_detection'), "output") - image = cv2.imread(path_output + "/images/random.jpg",0) - # draw the bounding box of the face along with the associated - # probability - text = "{}: {:.2f}%".format(name, proba * 100) - y = y1 - 10 if y1 - 10 > 10 else y1 + 10 - cv2.rectangle(image, (x1, y1), (x2+x1, y2+y1), - (0, 0, 255), 2) - cv2.putText(image, text, (x1, y), - cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2) - # show the output image - - bridge = CvBridge() - # cv_image = bridge.imgmsg_to_cv2(image, desired_encoding='passthrough') - cv2.imwrite(path_output + "/images/random" + str(i+1)+".jpg", image) - - -if __name__ == "__main__": - rospy.init_node("recognise_people_server") - server = RecognisePeopleServer() - service = rospy.Service('recognise_people_server', RecognisePeople, server.recogniser) - rospy.loginfo("Recognise People Service initialised") - rospy.spin() diff --git a/legacy/people_detection/recognise_people/src/recognise_people/recognise_state.py b/legacy/people_detection/recognise_people/src/recognise_people/recognise_state.py deleted file mode 100755 index 299bb952f..000000000 --- a/legacy/people_detection/recognise_people/src/recognise_people/recognise_state.py +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/env python3 -# coding=utf-8 -import os -import shutil -import rospy -from sensor_msgs.msg import Image, PointCloud2 -import rospkg -import sys -from face_detection.recognize import recognize -from face_detection.train_model import train -from cv_bridge3 import CvBridge -from cv_bridge3 import cv2 -import smach -import itertools - -from robocup_receptionist.utils.servers_clients import detect_person -from robocup_receptionist.helpers import get_percentage_overlap, get_corners - -from face_detection.srv import FaceDetection, FaceDetectionPCL - -''' - Redetects people in the robot's view using a trained model. -''' -## TODO: refactor - -class RedetectState(smach.State): - def __init__(self, head_controller, torso_controller): - smach.State.__init__(self, input_keys=["guest_list"], output_keys=['introduction_combinations', "guest_list"], - outcomes=['people_recognized', 'no_people_recognized']) - self.bridge = CvBridge() - self.head_controller = head_controller - self.torso_controller = torso_controller - - if self.head_controller: - self.rotate_head = self.head_controller.sync_reach_to - else: - self.rotate_head = lambda *args, **kwargs: None - - # Use PointCloud2 - if rospy.get_published_topics(namespace='/xtion'): - rospy.wait_for_service("face_detection_pcl") - self.face_detection = rospy.ServiceProxy("face_detection_pcl", FaceDetectionPCL) - self.topic = '/xtion/depth_registered/points' - self.dtype = PointCloud2 - else: - # Use Image - rospy.wait_for_service("people_detection") - self.face_detection = rospy.ServiceProxy("people_detection", FaceDetection) - self.topic = '/usb_cam/image_raw' - self.dtype = Image - - def execute(self, userdata): - print(userdata.guest_list) - direction = 0.2 - - face_detections = [] - - yolo_detections = [] - - detection_map = {} - - self.torso_controller.sync_raise(0.4) - - for _ in range(4): - direction *= -1 - self.rotate_head(direction, -0.2, velocities=[0.1, 0.0]) - - pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) - face_detection_resp = self.face_detection(pcl_msg) - yolo_detections.extend(detect_person()) - - for detection in face_detection_resp.detections: - try: - n = [d.name for d in face_detections].index(detection.name) - if detection.confidence > face_detections[n].confidence: - face_detections[n] = detection - except ValueError: - face_detections.append(detection) - - # get bb of person using face bb and body bb - for face in face_detections: - for detection in yolo_detections: - x1, y1, x2, y2 = face.bb - - head_x = (x1 + x2) / 2 # center of head in x-axis - body_x = (detection.xywh[0] + detection.xywh[2]) / 2 # center of body in x-axis - - if abs(head_x - body_x) < 20: - if not face.name in detection_map.keys(): - detection_map[face.name] = (face, detection) - print("yolo detection here, bb overlapped.") - break - if detection.confidence > detection_map[face.name][1].confidence: - detection_map[face.name] = (face, detection) - else: - print(face.name, face.bb, detection.xywh) - - intersect = [] - if len(detection_map.keys()): - for face_detection, yolo_detection in detection_map.values(): - print(f"Checking {face_detection.name}") - try: - n = [g.name for g in userdata.guest_list].index(face_detection.name) - userdata.guest_list[n].last_known_pose = yolo_detection.centroid - intersect.append(userdata.guest_list[n]) - except ValueError: - continue - print(intersect) - - # Create list of combinations of pairs of guests. - comb = list(itertools.combinations([g for g in intersect], 2)) - - # Sort the combinations. - comb = sorted(comb, key=lambda element: (element[0].name, element[1].name)) - - # Insert the reverse of each pair, ensuring the sorting critera is maintained. - comb = [a for pair in [(c, c[::-1]) for c in comb] for a in pair] - - print(comb) - userdata.introduction_combinations = comb - return 'people_recognized' - else: - return 'no_people_recognized' - - -if __name__ == '__main__': - rospy.init_node('smach_example_state_machine') - sm = smach.StateMachine(outcomes=['success']) - with sm: - smach.StateMachine.add('REDETECT', RedetectState(), - transitions={'finished_redetection': 'success'}, - remapping={'out_redetected_people': 'redetected_people'}) - outcome = sm.execute() - rospy.loginfo('I have completed execution with outcome: ') - rospy.loginfo(outcome) \ No newline at end of file diff --git a/legacy/people_detection/recognise_people/srv/RecognisePeople.srv b/legacy/people_detection/recognise_people/srv/RecognisePeople.srv deleted file mode 100644 index ec43502a4..000000000 --- a/legacy/people_detection/recognise_people/srv/RecognisePeople.srv +++ /dev/null @@ -1,4 +0,0 @@ -lasr_vision_msgs/Detection[] detected_objects_yolo -lasr_vision_msgs/Detection[] detected_objects_opencv ---- -lasr_vision_msgs/Detection[] detected_objects diff --git a/legacy/people_detection/train_dataset_model/CMakeLists.txt b/legacy/people_detection/train_dataset_model/CMakeLists.txt deleted file mode 100644 index 5088a7e79..000000000 --- a/legacy/people_detection/train_dataset_model/CMakeLists.txt +++ /dev/null @@ -1,205 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(train_dataset_model) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - rospy - std_msgs -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html - catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder -# add_service_files( -# FILES -# Service1.srv -# Service2.srv -# ) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -# generate_messages( -# DEPENDENCIES -# std_msgs -# ) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES train_dataset_model -# CATKIN_DEPENDS rospy std_msgs -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/train_dataset_model.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/train_dataset_model_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_train_dataset_model.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/legacy/people_detection/train_dataset_model/README.md b/legacy/people_detection/train_dataset_model/README.md deleted file mode 100644 index c7217078b..000000000 --- a/legacy/people_detection/train_dataset_model/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# train_dataset_model - -The train_dataset_model package - -This package is maintained by: -- [nicole](mailto:nicole@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- rospy (build) -- std_msgs (build) -- rospy (exec) -- std_msgs (exec) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -The train model state has the following file : - -1. train model => simply call a method to train -2. extract embeddings -3. train model state => i currently use this state to train - -on line 19 and line 20 you have to set some variables -> i will work to make it easier in the future - -when you have run the create_dataset: - -```python -rosrun create_dataset create_dataset_state.py -``` - -and you have a folder with a random name for instance 'wfeigq' -then you set that folder name in _line 20_ in train_model_state.py -on _line 19_ you set the name that you want to recognise the person with. - -I currently take 10 photos and recognise 5 people: - -1. flatmate -2. american -3. elisabeth -4. gerard -5. matteo - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -This package has no launch files. - -### Messages - -This package has no messages. - -### Services - -This package has no services. - -### Actions - -This package has no actions. diff --git a/legacy/people_detection/train_dataset_model/__init__.py b/legacy/people_detection/train_dataset_model/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/legacy/people_detection/train_dataset_model/doc/USAGE.md b/legacy/people_detection/train_dataset_model/doc/USAGE.md deleted file mode 100644 index 412c3cee1..000000000 --- a/legacy/people_detection/train_dataset_model/doc/USAGE.md +++ /dev/null @@ -1,25 +0,0 @@ -The train model state has the following file : - -1. train model => simply call a method to train -2. extract embeddings -3. train model state => i currently use this state to train - -on line 19 and line 20 you have to set some variables -> i will work to make it easier in the future - -when you have run the create_dataset: - -```python -rosrun create_dataset create_dataset_state.py -``` - -and you have a folder with a random name for instance 'wfeigq' -then you set that folder name in _line 20_ in train_model_state.py -on _line 19_ you set the name that you want to recognise the person with. - -I currently take 10 photos and recognise 5 people: - -1. flatmate -2. american -3. elisabeth -4. gerard -5. matteo diff --git a/legacy/people_detection/train_dataset_model/package.xml b/legacy/people_detection/train_dataset_model/package.xml deleted file mode 100644 index 5e35508b3..000000000 --- a/legacy/people_detection/train_dataset_model/package.xml +++ /dev/null @@ -1,65 +0,0 @@ - - - train_dataset_model - 0.0.0 - The train_dataset_model package - - - - - nicole - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - rospy - std_msgs - rospy - std_msgs - rospy - std_msgs - - - - - - - - diff --git a/legacy/people_detection/train_dataset_model/src/__init__.py b/legacy/people_detection/train_dataset_model/src/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/legacy/people_detection/train_dataset_model/src/train_dataset_model/__init__.py b/legacy/people_detection/train_dataset_model/src/train_dataset_model/__init__.py deleted file mode 100644 index 1bdbcae27..000000000 --- a/legacy/people_detection/train_dataset_model/src/train_dataset_model/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .train_model_state import TrainModelState \ No newline at end of file diff --git a/legacy/people_detection/train_dataset_model/src/train_dataset_model/extract_embeddings.py b/legacy/people_detection/train_dataset_model/src/train_dataset_model/extract_embeddings.py deleted file mode 100644 index 52d6b0494..000000000 --- a/legacy/people_detection/train_dataset_model/src/train_dataset_model/extract_embeddings.py +++ /dev/null @@ -1,105 +0,0 @@ -#!/usr/bin/env python3 -# import the necessary packages -import numpy as np -import argparse -import imutils -from imutils import paths -import pickle -import cv2 -import os -import rospkg - - -# Responsible for using a deep learning feature extractor to generate a 128-D vector describing a face. All faces in -# our dataset will be passed through the neural network to generate embeddings. - -def extract(): - # load our serialized face detector from disk - path_face_detection_model = os.path.join(rospkg.RosPack().get_path('face_detection'), 'caffe_model') - path_nn = os.path.join(rospkg.RosPack().get_path('face_detection'), 'nn4', "nn4.small2.v1.t7") - path_dataset = os.path.join(rospkg.RosPack().get_path('create_dataset'), "dataset") - path_output = os.path.join(rospkg.RosPack().get_path('face_detection'), "output") - - print("[INFO] loading face detector...") - proto_path = os.path.join(path_face_detection_model, "deploy.prototxt") - model_path = os.path.join(path_face_detection_model, - "res10_300x300_ssd_iter_140000.caffemodel") - detector = cv2.dnn.readNetFromCaffe(proto_path, model_path) - # load our serialized face embedding model from disk - print("[INFO] loading face recognizer...") - embedder = cv2.dnn.readNetFromTorch(path_nn) - - # grab the paths to the input images in our dataset - print("[INFO] quantifying faces...") - image_paths = list(paths.list_images(path_dataset)) - print(image_paths) - # initialize our lists of extracted facial embeddings and - # corresponding people names - known_embeddings = [] - known_names = [] - # initialize the total number of faces processed - total = 0 - - # loop over the image paths - for (i, imagePath) in enumerate(image_paths): - # extract the person name from the image path - print("[INFO] processing image {}/{}".format(i + 1, - len(image_paths))) - name = imagePath.split(os.path.sep)[-2] - # load the image, resize it to have a width of 600 pixels (while - # maintaining the aspect ratio), and then grab the image - # dimensions - image = cv2.imread(imagePath) - print(imagePath) - image = imutils.resize(image, width=600) - (h, w) = image.shape[:2] - - # construct a blob from the image - image_blob = cv2.dnn.blobFromImage( - cv2.resize(image, (300, 300)), 1.0, (300, 300), - (104.0, 177.0, 123.0), swapRB=False, crop=False) - # apply OpenCV's deep learning-based face detector to localize - # faces in the input image - detector.setInput(image_blob) - detections = detector.forward() - # ensure at least one face was found - if len(detections) > 0: - # we're making the assumption that each image has only ONE - # face, so find the bounding box with the largest probability - i = np.argmax(detections[0, 0, :, 2]) - confidence = detections[0, 0, i, 2] - # ensure that the detection with the largest probability also - # means our minimum probability test (thus helping filter out - # weak detections) - if confidence > 0.5: - # compute the (x, y)-coordinates of the bounding box for - # the face - box = detections[0, 0, i, 3:7] * np.array([w, h, w, h]) - (startX, startY, endX, endY) = box.astype("int") - - # extract the face ROI and grab the ROI dimensions - face = image[startY:endY, startX:endX] - (fH, fW) = face.shape[:2] - # ensure the face width and height are sufficiently large - if fW < 20 or fH < 20: - continue - # construct a blob for the face ROI, then pass the blob - # through our face embedding model to obtain the 128-d - # quantification of the face - faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255, - (96, 96), (0, 0, 0), swapRB=True, crop=False) - embedder.setInput(faceBlob) - vec = embedder.forward() - # add the name of the person + corresponding face - # embedding to their respective lists - known_names.append(name) - - known_embeddings.append(vec.flatten()) - total += 1 - # dump the facial embeddings + names to disk - print("[INFO] serializing {} encodings...".format(total)) - print('YO I KNOW THESE PEEPS: ', known_names) - data = {"embeddings": known_embeddings, "names": known_names} - f = open(path_output + "/embeddings.pickle", "wb") - f.write(pickle.dumps(data)) - f.close() \ No newline at end of file diff --git a/legacy/people_detection/train_dataset_model/src/train_dataset_model/train_model.py b/legacy/people_detection/train_dataset_model/src/train_dataset_model/train_model.py deleted file mode 100644 index 2096b13b0..000000000 --- a/legacy/people_detection/train_dataset_model/src/train_dataset_model/train_model.py +++ /dev/null @@ -1,39 +0,0 @@ -from sklearn.preprocessing import LabelEncoder -from sklearn.svm import SVC -import pickle -import os -import rospkg - -# Detect faces, extract embeddings, and fit our SVM model to the embeddings data. - -# construct the argument parser and parse the arguments -# load the face embeddings -def train_model(): - # self.path = os.path.join(rospkg.RosPack().get_path('people_detection'), 'dataset', - # self.name) - path = os.path.join(rospkg.RosPack().get_path('face_detection'), 'output', 'embeddings.pickle') - path_output = os.path.join(rospkg.RosPack().get_path('face_detection'), 'output') - - print("[INFO] loading face embeddings...") - print(open(path, "rb").read()) - data = pickle.loads(open(path, "rb").read()) - # encode the labels - print("[INFO] encoding labels...") - le = LabelEncoder() - labels = le.fit_transform(data["names"]) - - # train the model used to accept the 128-d embeddings of the face and - # then produce the actual face recognition - print("[INFO] training model...") - recognizer = SVC(C=1.0, kernel="linear", probability=True) - recognizer.fit(data["embeddings"], labels) - - # write the actual face recognition model to disk - f = open(os.path.join(path_output, "recognizer.pickle"), "wb") - f.write(pickle.dumps(recognizer)) - f.close() - # write the label encoder to disk - f = open(os.path.join(path_output, "le.pickle"), "wb") - f.write(pickle.dumps(le)) - f.close() - diff --git a/legacy/people_detection/train_dataset_model/src/train_dataset_model/train_model_state.py b/legacy/people_detection/train_dataset_model/src/train_dataset_model/train_model_state.py deleted file mode 100755 index 74adb1c11..000000000 --- a/legacy/people_detection/train_dataset_model/src/train_dataset_model/train_model_state.py +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env python3 -import rospy -import os, rospkg -import shutil -import smach -from extract_embeddings import extract -from train_model import train_model as train - - -class TrainModelState(smach.State): - def __init__(self): - smach.State.__init__(self, - outcomes=['finished_training', 'training_failed'], - input_keys=['current_person', 'dataset_path'], # ! comment for tesing purposes - output_keys=['current_person']) # ! comment for tesing purposes - - - def execute(self, userdata): - name = 'matteo' - dataset_path = os.path.join(rospkg.RosPack().get_path('create_dataset'), 'dataset', 'mateo') - new = dataset_path.replace(dataset_path.split(os.path.sep)[-1], name) - if os.path.exists(new): - shutil.rmtree(new) - - os.renames(dataset_path, - dataset_path.replace(dataset_path.split(os.path.sep)[-1], name)) - - if len(next(os.walk(os.path.dirname(dataset_path)))[1]) > 1: - extract() - train() - return 'finished_training' - else: - rospy.logwarn("At least 2 datasets are required for training.") - return 'training_failed' - -if __name__ == '__main__': - rospy.init_node('smach_example_state_machine') - name = 'alice' - dataset_path = os.path.join(rospkg.RosPack().get_path('create_dataset'), 'dataset', 'alice') - sm = smach.StateMachine(outcomes=['success', 'failed']) - with sm: - smach.StateMachine.add('TRAIN_MODEL', TrainModelState(), - transitions={'finished_training':'success', 'training_failed':'failed'}, - remapping={'dataset_path':'dataset_path', - 'current_person':'current_person'}) - outcome = sm.execute() - rospy.loginfo('I have completed execution with outcome: ') - rospy.loginfo(outcome) diff --git a/legacy/lasr_object_detection_yolo/__init__.py b/legacy/read_pcl_info/CATKIN_IGNORE similarity index 100% rename from legacy/lasr_object_detection_yolo/__init__.py rename to legacy/read_pcl_info/CATKIN_IGNORE diff --git a/skills/package.xml b/skills/package.xml index be2d32977..44a4f5827 100644 --- a/skills/package.xml +++ b/skills/package.xml @@ -7,7 +7,7 @@ - Jared Swift + Jared Swift diff --git a/skills/src/lasr_skills/__init__.py b/skills/src/lasr_skills/__init__.py index b278ee554..f1a0a65b6 100755 --- a/skills/src/lasr_skills/__init__.py +++ b/skills/src/lasr_skills/__init__.py @@ -1,13 +1,10 @@ -from .detect_objects import DetectObjects -from .detect_objects_3d import DetectObjects3D -from .detect_objects_in_area_3d import DetectObjectsInArea3D -from .detect_people import DetectPeople -from .detect_people_3d import DetectPeople3D -from .detect_people_in_area_3d import DetectPeopleInArea3D +from .detect import Detect +from .detect_3d import Detect3D +from .detect_3d_in_area import Detect3DInArea from .wait_for_person import WaitForPerson from .wait_for_person_in_area import WaitForPersonInArea from .describe_people import DescribePeople from .look_to_point import LookToPoint from .look_to_point import LookToPoint from .go_to_location import GoToLocation -from .go_to_semantic_location import GoToSemanticLocation \ No newline at end of file +from .go_to_semantic_location import GoToSemanticLocation diff --git a/skills/src/lasr_skills/detect.py b/skills/src/lasr_skills/detect.py new file mode 100644 index 000000000..b97987041 --- /dev/null +++ b/skills/src/lasr_skills/detect.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python3 + +import rospy +import smach +from sensor_msgs.msg import Image + +from lasr_vision_msgs.srv import YoloDetection + +from typing import List + + +class Detect(smach.State): + + def __init__( + self, + image_topic: str = "/xtion/rgb/image_raw", + model: str = "yolov8n.pt", + filter: List[str] | None = None, + confidence: float = 0.5, + nms: float = 0.3, + ): + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + output_keys=["detections"], + ) + self.image_topic = image_topic + self.model = model + self.filter = filter if filter is not None else [] + self.confidence = confidence + self.nms = nms + self.yolo = rospy.ServiceProxy("/yolov8/detect", YoloDetection) + self.yolo.wait_for_service() + + def execute(self, userdata): + img_msg = rospy.wait_for_message(self.image_topic, Image) + try: + result = self.yolo(img_msg, self.model, self.confidence, self.nms) + result.detected_objects = [ + det for det in result.detected_objects if det.name in self.filter + ] + userdata.detections = result + return "succeeded" + except rospy.ServiceException as e: + rospy.logwarn(f"Unable to perform inference. ({str(e)})") + return "failed" diff --git a/skills/src/lasr_skills/detect_3d.py b/skills/src/lasr_skills/detect_3d.py new file mode 100644 index 000000000..2e0d7e016 --- /dev/null +++ b/skills/src/lasr_skills/detect_3d.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python3 +import rospy +import smach + +from sensor_msgs.msg import PointCloud2 +from lasr_vision_msgs.srv import YoloDetection3D + +from typing import List + + +class Detect3D(smach.State): + + def __init__( + self, + depth_topic: str = "/xtion/depth_registered/points", + model: str = "yolov8n-seg.pt", + filter: List[str] | None = None, + confidence: float = 0.5, + nms: float = 0.3, + ): + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + output_keys=["detections_3d"], + ) + self.depth_topic = depth_topic + self.model = model + self.filter = filter if filter is not None else [] + self.confidence = confidence + self.nms = nms + self.yolo = rospy.ServiceProxy("/yolov8/detect3d", YoloDetection3D) + self.yolo.wait_for_service() + + def execute(self, userdata): + pcl_msg = rospy.wait_for_message(self.depth_topic, PointCloud2) + try: + result = self.yolo(pcl_msg, self.model, self.confidence, self.nms) + result.detected_objects = [ + det for det in result.detected_objects if det.name in self.filter + ] + userdata.detections_3d = result + return "succeeded" + except rospy.ServiceException as e: + rospy.logwarn(f"Unable to perform inference. ({str(e)})") + return "failed" diff --git a/skills/src/lasr_skills/detect_3d_in_area.py b/skills/src/lasr_skills/detect_3d_in_area.py new file mode 100644 index 000000000..600613707 --- /dev/null +++ b/skills/src/lasr_skills/detect_3d_in_area.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python3 + +import smach + +from lasr_skills import Detect3D +from lasr_shapely import LasrShapely + + +from typing import List + + +class Detect3DInArea(smach.StateMachine): + + class FilterDetections(smach.State): + + def __init__(self, area_polygon): + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + input_keys=["detections_3d"], + output_keys=["detections_3d"], + ) + self.area_polygon = area_polygon + self.shapely = LasrShapely() + + def execute(self, userdata): + satisfied_points = self.shapely.are_points_in_polygon_2d( + userdata.area_polygon, + [[pose[0], pose[1]] for (_, pose) in userdata.detections_3d], + ).inside + filtered_detections = [ + userdata.detections_3d[i] + for i in range(0, len(userdata.detections_3d)) + if satisfied_points[i] + ] + userdata.detections_3d = filtered_detections + + return "succeeded" + + def __init__( + self, + area_polygon: List[List[float]], + depth_topic: str = "/xtion/depth_registered/points", + model: str = "yolov8n-seg.pt", + filter: List[str] | None = None, + confidence: float = 0.5, + nms: float = 0.3, + ): + smach.StateMachine.__init__( + self, + outcomes=["succeeded", "failed"], + output_keys=["detections_3d"], + ) + + with self: + + smach.StateMachine.add( + "DETECT_OBJECTS_3D", + Detect3D( + depth_topic=depth_topic, + model=model, + filter=filter, + confidence=confidence, + nms=nms, + ), + transitions={"succeeded": "FILTER_DETECTIONS", "failed": "failed"}, + ) + smach.StateMachine.add( + "FILTER_DETECTIONS", + self.FilterDetections(area_polygon), + transitions={"succeeded": "succeeded", "failed": "failed"}, + ) diff --git a/skills/src/lasr_skills/detect_objects.py b/skills/src/lasr_skills/detect_objects.py deleted file mode 100644 index 195587f82..000000000 --- a/skills/src/lasr_skills/detect_objects.py +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env python3 - -import rospy -import smach -from sensor_msgs.msg import Image - -from lasr_vision_msgs.srv import YoloDetection - -class DetectObjects(smach.State): - - def __init__(self): - smach.State.__init__(self, outcomes=['succeeded', 'failed'], input_keys=['image_topic', 'img_msg', 'filter'], output_keys=['detections']) - self.yolo = rospy.ServiceProxy('/yolov8/detect', YoloDetection) - - def execute(self, userdata): - img_msg = rospy.wait_for_message(userdata.image_topic, Image) - try: - result = self.yolo(img_msg, "yolov8n.pt", 0.5, 0.3) - result.detected_objects = [det for det in result.detected_objects if det.name in userdata.filter] - userdata.detections = result - return 'succeeded' - except rospy.ServiceException as e: - rospy.logwarn(f"Unable to perform inference. ({str(e)})") - return 'failed' \ No newline at end of file diff --git a/skills/src/lasr_skills/detect_objects_3d.py b/skills/src/lasr_skills/detect_objects_3d.py deleted file mode 100644 index 239098bf8..000000000 --- a/skills/src/lasr_skills/detect_objects_3d.py +++ /dev/null @@ -1,94 +0,0 @@ -#!/usr/bin/env python3 -import rospy -import smach - -from common_math import pcl_msg_to_cv2, seg_to_centroid -from cv_bridge3 import CvBridge -from tf_module.srv import TfTransform, TfTransformRequest -from std_msgs.msg import String -from geometry_msgs.msg import PointStamped, Point -from sensor_msgs.msg import PointCloud2 -from geometry_msgs.msg import PointStamped -from visualization_msgs.msg import Marker -import numpy as np - -from lasr_vision_msgs.srv import YoloDetection - - - -class DetectObjects3D(smach.State): - - def __init__(self): - smach.State.__init__(self, outcomes=['succeeded', 'failed'], input_keys=['depth_topic', 'filter'], output_keys=['detections_3d']) - self.yolo = rospy.ServiceProxy('/yolov8/detect', YoloDetection) - self.tf = rospy.ServiceProxy("/tf_transform", TfTransform) - self.bridge = CvBridge() - self.n_published_markers = 0 - self.marker_point_pub = rospy.Publisher("/detect_objects_3d/points", Marker) - self.marker_text_pub = rospy.Publisher("/detect_objects_3d/labels", Marker) - - def estimate_pose(self, pcl_msg, detection): - centroid_xyz = seg_to_centroid(pcl_msg, np.array(detection.xyseg)) - centroid = PointStamped() - centroid.point = Point(*centroid_xyz) - centroid.header = pcl_msg.header - tf_req = TfTransformRequest() - tf_req.target_frame = String("map") - tf_req.point = centroid - response = self.tf(tf_req) - return np.array([response.target_point.point.x, response.target_point.point.y, response.target_point.point.z]) - - def execute(self, userdata): - pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) - try: - cv_im = pcl_msg_to_cv2(pcl_msg) - img_msg = self.bridge.cv2_to_imgmsg(cv_im) - result = self.yolo(img_msg, "yolov8n-seg.pt", 0.5, 0.3) - result.detected_objects = [det for det in result.detected_objects if det.name in userdata.filter] - result = [(detection, self.estimate_pose(pcl_msg, detection)) for detection in result.detected_objects] - for detection, point in result: - marker_point_msg = Marker() - marker_point_msg.header.frame_id = "map" - marker_point_msg.header.stamp = rospy.Time.now() - marker_point_msg.id = self.n_published_markers - marker_point_msg.type = Marker.SPHERE - marker_point_msg.action = Marker.ADD - marker_point_msg.pose.position.x = point[0] - marker_point_msg.pose.position.y = point[1] - marker_point_msg.pose.position.z = point[2] - marker_point_msg.color.r = 1.0 - marker_point_msg.color.g = 0.0 - marker_point_msg.color.b = 0.0 - marker_point_msg.scale.x = 0.1 - marker_point_msg.scale.y = 0.1 - marker_point_msg.scale.z = 0.1 - marker_point_msg.color.a = 1.0 - self.marker_point_pub.publish(marker_point_msg) - - marker_text_msg = Marker() - marker_text_msg.header.frame_id = "map" - marker_text_msg.header.stamp = rospy.Time.now() - marker_text_msg.id = self.n_published_markers - marker_text_msg.type = Marker.TEXT_VIEW_FACING - marker_text_msg.action = Marker.ADD - marker_text_msg.pose.position.x = point[0] - marker_text_msg.pose.position.y = point[1] - marker_text_msg.pose.position.z = point[2] + 0.15 - marker_text_msg.text = detection.name - marker_text_msg.color.r = 1.0 - marker_text_msg.color.g = 0.0 - marker_text_msg.color.b = 0.0 - marker_text_msg.scale.x = 0.1 - marker_text_msg.scale.y = 0.1 - marker_text_msg.scale.z = 0.1 - marker_text_msg.color.a = 1.0 - self.marker_text_pub.publish(marker_text_msg) - - - self.n_published_markers += 1 - - userdata.detections_3d = result - return 'succeeded' - except rospy.ServiceException as e: - rospy.logwarn(f"Unable to perform inference. ({str(e)})") - return 'failed' \ No newline at end of file diff --git a/skills/src/lasr_skills/detect_objects_in_area_3d.py b/skills/src/lasr_skills/detect_objects_in_area_3d.py deleted file mode 100644 index 009496741..000000000 --- a/skills/src/lasr_skills/detect_objects_in_area_3d.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env python3 - -import smach - -from lasr_skills import DetectObjects3D -from lasr_shapely import LasrShapely - - -class DetectObjectsInArea3D(smach.StateMachine): - - class FilterDetections(smach.State): - - def __init__(self): - smach.State.__init__(self, outcomes=['succeeded', 'failed'], input_keys=['area_polygon','detections_3d'], output_keys=['detections_3d']) - self.shapely = LasrShapely() - - def execute(self, userdata): - satisfied_points = self.shapely.are_points_in_polygon_2d(userdata.area_polygon, [[pose[0], pose[1]] for (_, pose) in userdata.detections_3d]).inside - filtered_detections = [userdata.detections_3d[i] for i in range(0, len(userdata.detections_3d)) if satisfied_points[i]] - userdata.detections_3d = filtered_detections - - return 'succeeded' - - def __init__(self): - smach.StateMachine.__init__(self, outcomes=['succeeded', 'failed'], input_keys=['pcl_msg', 'area_polygon', 'filter'], output_keys=['detections_3d']) - - with self: - - smach.StateMachine.add('DETECT_OBJECTS_3D', DetectObjects3D(), transitions={'succeeded': 'FILTER_DETECTIONS', 'failed' : 'failed'}) - smach.StateMachine.add('FILTER_DETECTIONS', self.FilterDetections(), transitions={'succeeded' : 'succeeded', 'failed' : 'failed'}) \ No newline at end of file diff --git a/skills/src/lasr_skills/detect_people.py b/skills/src/lasr_skills/detect_people.py deleted file mode 100755 index 7c46dfc7b..000000000 --- a/skills/src/lasr_skills/detect_people.py +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python3 - -from lasr_skills import DetectObjects - -class DetectPeople(DetectObjects): - - def __init__(self): - super().__init__() - self.userdata.filter = ["person"] \ No newline at end of file diff --git a/skills/src/lasr_skills/detect_people_3d.py b/skills/src/lasr_skills/detect_people_3d.py deleted file mode 100755 index 2fe1d383d..000000000 --- a/skills/src/lasr_skills/detect_people_3d.py +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python3 - -from lasr_skills import DetectObjects3D - -class DetectPeople3D(DetectObjects3D): - - def __init__(self): - super().__init__() - self.userdata.filter = ["person"] \ No newline at end of file diff --git a/skills/src/lasr_skills/detect_people_in_area_3d.py b/skills/src/lasr_skills/detect_people_in_area_3d.py deleted file mode 100755 index 05c3089c1..000000000 --- a/skills/src/lasr_skills/detect_people_in_area_3d.py +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python3 - -from lasr_skills import DetectObjectsInArea3D - -class DetectPeopleInArea3D(DetectObjectsInArea3D): - - def __init__(self): - super().__init__() - self.userdata.filter = ["person"] \ No newline at end of file diff --git a/skills/src/lasr_skills/wait_for_person.py b/skills/src/lasr_skills/wait_for_person.py index 68aacc469..85b31d033 100755 --- a/skills/src/lasr_skills/wait_for_person.py +++ b/skills/src/lasr_skills/wait_for_person.py @@ -2,25 +2,41 @@ import smach -from lasr_skills import DetectPeople +from lasr_skills import Detect + class WaitForPerson(smach.StateMachine): class CheckForPerson(smach.State): def __init__(self): - smach.State.__init__(self, outcomes=['done', 'not_done'], input_keys=['detections']) + smach.State.__init__( + self, outcomes=["done", "not_done"], input_keys=["detections"] + ) def execute(self, userdata): if len(userdata.detections.detected_objects): - return 'done' + return "done" else: - return 'not_done' - + return "not_done" + def __init__(self): - smach.StateMachine.__init__(self, outcomes=['succeeded', 'failed'], input_keys=['depth_topic'], output_keys=['detections']) + smach.StateMachine.__init__( + self, + outcomes=["succeeded", "failed"], + input_keys=["depth_topic"], + output_keys=["detections"], + ) with self: - smach.StateMachine.add('DETECT_PEOPLE', DetectPeople(), transitions={'succeeded' : 'CHECK_FOR_PERSON', 'failed' : 'failed'}) - smach.StateMachine.add('CHECK_FOR_PERSON', self.CheckForPerson(), transitions={'done' : 'succeeded', 'not_done' : 'DETECT_PEOPLE'}) \ No newline at end of file + smach.StateMachine.add( + "DETECT_PEOPLE", + Detect(filter=["person"]), + transitions={"succeeded": "CHECK_FOR_PERSON", "failed": "failed"}, + ) + smach.StateMachine.add( + "CHECK_FOR_PERSON", + self.CheckForPerson(), + transitions={"done": "succeeded", "not_done": "DETECT_PEOPLE"}, + ) diff --git a/skills/src/lasr_skills/wait_for_person_in_area.py b/skills/src/lasr_skills/wait_for_person_in_area.py index 1ce3bcd98..604812b3b 100755 --- a/skills/src/lasr_skills/wait_for_person_in_area.py +++ b/skills/src/lasr_skills/wait_for_person_in_area.py @@ -2,24 +2,40 @@ import smach -from lasr_skills import DetectPeopleInArea3D +from lasr_skills import Detect3DInArea + class WaitForPersonInArea(smach.StateMachine): class CheckForPerson(smach.State): def __init__(self): - smach.State.__init__(self, outcomes=['done', 'not_done'], input_keys=['detections_3d']) + smach.State.__init__( + self, outcomes=["done", "not_done"], input_keys=["detections_3d"] + ) def execute(self, userdata): if len(userdata.detections_3d): - return 'done' + return "done" else: - return 'not_done' - + return "not_done" + def __init__(self): - smach.StateMachine.__init__(self, outcomes=['succeeded', 'failed'], input_keys=['depth_topic', 'area_polygon'], output_keys=['detections_3d']) + smach.StateMachine.__init__( + self, + outcomes=["succeeded", "failed"], + input_keys=["depth_topic", "area_polygon"], + output_keys=["detections_3d"], + ) with self: - smach.StateMachine.add('DETECT_PEOPLE_3D', DetectPeopleInArea3D(), transitions={'succeeded' : 'CHECK_FOR_PERSON', 'failed' : 'failed'}) - smach.StateMachine.add('CHECK_FOR_PERSON', self.CheckForPerson(), transitions={'done' : 'succeeded', 'not_done' : 'DETECT_PEOPLE_3D'}) \ No newline at end of file + smach.StateMachine.add( + "DETECT_PEOPLE_3D", + Detect3DInArea(), + transitions={"succeeded": "CHECK_FOR_PERSON", "failed": "failed"}, + ) + smach.StateMachine.add( + "CHECK_FOR_PERSON", + self.CheckForPerson(), + transitions={"done": "succeeded", "not_done": "DETECT_PEOPLE_3D"}, + ) diff --git a/legacy/lasr_object_detection_yolo/src/__init__.py b/tasks/coffee_shop/CATKIN_IGNORE similarity index 100% rename from legacy/lasr_object_detection_yolo/src/__init__.py rename to tasks/coffee_shop/CATKIN_IGNORE diff --git a/tasks/coffee_shop/package.xml b/tasks/coffee_shop/package.xml index 6b054e0d9..28038923d 100644 --- a/tasks/coffee_shop/package.xml +++ b/tasks/coffee_shop/package.xml @@ -7,7 +7,7 @@ - Jared Swift + Jared Swift Peter Tisnikar Paul Makles diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/__init__.py b/tasks/lift/CATKIN_IGNORE similarity index 100% rename from legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/__init__.py rename to tasks/lift/CATKIN_IGNORE diff --git a/worksheets/package.xml b/worksheets/package.xml index fdd316601..b01d124f6 100644 --- a/worksheets/package.xml +++ b/worksheets/package.xml @@ -7,7 +7,7 @@ - jared + Jared Swift