diff --git a/.gitmodules b/.gitmodules index 2f845f4b6..e69de29bb 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +0,0 @@ -[submodule "common/third_party/aruco_ros"] - path = common/third_party/aruco_ros - url = git@github.com:pal-robotics/aruco_ros.git diff --git a/legacy/lasr_web_server/CMakeLists.txt b/common/helpers/cv2_pcl/CMakeLists.txt similarity index 96% rename from legacy/lasr_web_server/CMakeLists.txt rename to common/helpers/cv2_pcl/CMakeLists.txt index 85fdea9c8..83e2d9e52 100644 --- a/legacy/lasr_web_server/CMakeLists.txt +++ b/common/helpers/cv2_pcl/CMakeLists.txt @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 3.0.2) -project(lasr_web_server) +project(cv2_pcl) ## Compile as C++11, supported in ROS Kinetic and newer # add_compile_options(-std=c++11) @@ -100,7 +100,7 @@ catkin_python_setup() ## DEPENDS: system dependencies of this project that dependent projects also need catkin_package( # INCLUDE_DIRS include -# LIBRARIES lasr_web_server +# LIBRARIES cv2_pcl # CATKIN_DEPENDS other_catkin_pkg # DEPENDS system_lib ) @@ -118,7 +118,7 @@ include_directories( ## Declare a C++ library # add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/lasr_web_server.cpp +# src/${PROJECT_NAME}/cv2_pcl.cpp # ) ## Add cmake target dependencies of the library @@ -129,7 +129,7 @@ include_directories( ## Declare a C++ executable ## With catkin_make all packages are built within a single CMake context ## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/lasr_web_server_node.cpp) +# add_executable(${PROJECT_NAME}_node src/cv2_pcl_node.cpp) ## Rename C++ executable without prefix ## The above recommended prefix causes long target names, the following renames the @@ -193,7 +193,7 @@ include_directories( ############# ## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_lasr_web_server.cpp) +# catkin_add_gtest(${PROJECT_NAME}-test test/test_cv2_pcl.cpp) # if(TARGET ${PROJECT_NAME}-test) # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) # endif() diff --git a/legacy/lasr_dialogflow/package.xml b/common/helpers/cv2_pcl/package.xml similarity index 89% rename from legacy/lasr_dialogflow/package.xml rename to common/helpers/cv2_pcl/package.xml index 05993da96..d8335720b 100644 --- a/legacy/lasr_dialogflow/package.xml +++ b/common/helpers/cv2_pcl/package.xml @@ -1,25 +1,25 @@ - lasr_dialogflow + cv2_pcl 0.0.0 - The lasr_dialogflow package + The cv2_pcl package - jared - + Jared Swift +a - TODO + MIT - + @@ -49,6 +49,7 @@ catkin + sensor_msgs diff --git a/legacy/common_math/setup.py b/common/helpers/cv2_pcl/setup.py similarity index 51% rename from legacy/common_math/setup.py rename to common/helpers/cv2_pcl/setup.py index ab3e09d99..76e948739 100644 --- a/legacy/common_math/setup.py +++ b/common/helpers/cv2_pcl/setup.py @@ -1,12 +1,8 @@ - #!/usr/bin/env python3 from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup -setup_args = generate_distutils_setup( - packages=['common_math'], - package_dir={'': 'src'} -) +setup_args = generate_distutils_setup(packages=["cv2_pcl"], package_dir={"": "src"}) -setup(**setup_args) \ No newline at end of file +setup(**setup_args) diff --git a/legacy/common_math/src/common_math/math_.py b/common/helpers/cv2_pcl/src/cv2_pcl/__init__.py old mode 100755 new mode 100644 similarity index 51% rename from legacy/common_math/src/common_math/math_.py rename to common/helpers/cv2_pcl/src/cv2_pcl/__init__.py index cff4af892..0c299f09a --- a/legacy/common_math/src/common_math/math_.py +++ b/common/helpers/cv2_pcl/src/cv2_pcl/__init__.py @@ -1,34 +1,19 @@ -#!/usr/bin/python3 -import numpy as np -import rospy import numpy as np +from sensor_msgs.msg import PointCloud2 import ros_numpy as rnp -# import cv2 -# from geometry_msgs.msg import PointStamped, Point -# from std_msgs.msg import Header -# from robocup_receptionist.srv import TfTransform, TfTransformRequest -import math import cv2 +Mat = np.ndarray -def pcl_msg_to_cv2(pcl_msg): +def pcl_to_cv2(pcl: PointCloud2) -> Mat: """ - Constructs a cv2 image from a PointCloud2 message. - - Parameters - ---------- - pcl_msg : sensor_msgs/PointCloud2 - Input pointcloud (organised) - - Returns - ------- - np.array : cv2 image + Convert a given PointCloud2 message to a cv2 image """ # Extract rgb image from pointcloud - frame = np.fromstring(pcl_msg.data, dtype=np.uint8) - frame = frame.reshape(pcl_msg.height, pcl_msg.width, 32) + frame = np.fromstring(pcl.data, dtype=np.uint8) + frame = frame.reshape(pcl.height, pcl.width, 32) frame = frame[:, :, 16:19] # Ensure array is contiguous @@ -37,12 +22,16 @@ def pcl_msg_to_cv2(pcl_msg): return frame -def seg_to_centroid(pcl_msg, xyseg): +def seg_to_centroid(pcl: PointCloud2, xyseg: np.ndarray) -> np.ndarray: + """ + Computes the centroid of a given segment in a pointcloud + """ + # Convert xyseg to contours contours = xyseg.reshape(-1, 2) # cv2 image - cv_im = pcl_msg_to_cv2(pcl_msg) + cv_im = pcl_to_cv2(pcl) # Compute mask from contours mask = np.zeros(shape=cv_im.shape[:2]) cv2.fillPoly(mask, pts=[contours], color=(255, 255, 255)) @@ -54,7 +43,7 @@ def seg_to_centroid(pcl_msg, xyseg): return np.full(3, np.nan) # Unpack pointcloud into xyz array - pcl_xyz = rnp.point_cloud2.pointcloud2_to_xyz_array(pcl_msg, remove_nans=False) + pcl_xyz = rnp.point_cloud2.pointcloud2_to_xyz_array(pcl, remove_nans=False) # Extract points of interest xyz_points = [pcl_xyz[x][y] for x, y in indices] @@ -62,12 +51,16 @@ def seg_to_centroid(pcl_msg, xyseg): return np.nanmean(xyz_points, axis=0) -def bb_to_centroid(pcl_msg, x, y, w, h): +def bb_to_centroid(pcl: PointCloud2, x: int, y: int, w: int, h: int) -> np.ndarray: + """ + Computes the centroid of a given bounding box in a pointcloud + """ + # Convert xywh to bounding box coordinates. x1, y1, x2, y2 = x, y, x + w, y + h # cv2 image - frame = pcl_msg_to_cv2(pcl_msg) + frame = pcl_to_cv2(pcl) # Compute mask from bounding box mask = np.zeros(shape=frame.shape[:2]) mask[y1:y2, x1:x2] = 255 # bounding box dimensions @@ -78,27 +71,10 @@ def bb_to_centroid(pcl_msg, x, y, w, h): return np.full(3, np.nan) # Unpack pointcloud into xyz array - pcl_xyz = rnp.point_cloud2.pointcloud2_to_xyz_array(pcl_msg, remove_nans=False) + pcl_xyz = rnp.point_cloud2.pointcloud2_to_xyz_array(pcl, remove_nans=False) # Extract points of interest xyz_points = [pcl_xyz[x][y] for x, y in indices] # Get mean (centroid) point. return np.nanmean(xyz_points, axis=0) - - -def euclidian(a, b): - ''' - Get the Euclidean distance between two positions - ''' - return math.sqrt( - math.pow(a.x - b.x, 2) + - math.pow(a.y - b.y, 2) + - math.pow(a.z - b.z, 2) - ) -def euclidian_distance(p1, p2): - x1, y1 = p1 - x2, y2 = p2 - a = np.array((x1, y1)) - b = np.array((x2, y2)) - return np.linalg.norm(a - b) diff --git a/legacy/markers/CMakeLists.txt b/common/helpers/markers/CMakeLists.txt similarity index 97% rename from legacy/markers/CMakeLists.txt rename to common/helpers/markers/CMakeLists.txt index eb31859f0..cdde01d73 100644 --- a/legacy/markers/CMakeLists.txt +++ b/common/helpers/markers/CMakeLists.txt @@ -8,11 +8,9 @@ project(markers) ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) ## is used, also find other catkin packages find_package(catkin REQUIRED COMPONENTS - geometry_msgs - message_generation - roscpp rospy - std_msgs + visualization_msgs + geometry_msgs ) ## System dependencies are found with CMake's conventions @@ -22,7 +20,7 @@ find_package(catkin REQUIRED COMPONENTS ## Uncomment this if the package has a setup.py. This macro ensures ## modules and global scripts declared therein get installed ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html - catkin_python_setup() +catkin_python_setup() ################################################ ## Declare ROS messages, services and actions ## @@ -72,7 +70,7 @@ find_package(catkin REQUIRED COMPONENTS ## Generate added messages and services with any dependencies listed here # generate_messages( # DEPENDENCIES -# geometry_msgs# std_msgs +# visualization_msgs # ) ################################################ @@ -107,7 +105,7 @@ find_package(catkin REQUIRED COMPONENTS catkin_package( # INCLUDE_DIRS include # LIBRARIES markers -# CATKIN_DEPENDS geometry_msgs message_generation roscpp rospy std_msgs +# CATKIN_DEPENDS rospy visualization_msgs # DEPENDS system_lib ) diff --git a/legacy/markers/package.xml b/common/helpers/markers/package.xml similarity index 87% rename from legacy/markers/package.xml rename to common/helpers/markers/package.xml index 2fcc15ea2..dc51b8f73 100644 --- a/legacy/markers/package.xml +++ b/common/helpers/markers/package.xml @@ -7,13 +7,13 @@ - nicole + Jared Swift - TODO + MIT @@ -49,19 +49,15 @@ catkin - geometry_msgs - message_generation - roscpp rospy - std_msgs - geometry_msgs - roscpp + visualization_msgs + geometry_msgs rospy - std_msgs - geometry_msgs - roscpp + visualization_msgs + geometry_msgs rospy - std_msgs + visualization_msgs + geometry_msgs diff --git a/legacy/listen_module/setup.py b/common/helpers/markers/setup.py similarity index 50% rename from legacy/listen_module/setup.py rename to common/helpers/markers/setup.py index e38e9fd5e..69e7398b0 100644 --- a/legacy/listen_module/setup.py +++ b/common/helpers/markers/setup.py @@ -3,9 +3,6 @@ from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup -setup_args = generate_distutils_setup( - packages=['listen_module'], - package_dir={'': 'src'} -) +setup_args = generate_distutils_setup(packages=["markers"], package_dir={"": "src"}) -setup(**setup_args) \ No newline at end of file +setup(**setup_args) diff --git a/common/helpers/markers/src/markers/__init__.py b/common/helpers/markers/src/markers/__init__.py new file mode 100644 index 000000000..aaa58214e --- /dev/null +++ b/common/helpers/markers/src/markers/__init__.py @@ -0,0 +1,52 @@ +import rospy + +from visualization_msgs.msg import Marker +from geometry_msgs.msg import PointStamped + +from collections import defaultdict + +from typing import Union + +publisher_counts = defaultdict(int) + + +def create_marker( + point_stamped: PointStamped, + idx: int, + r: float = 0.0, + g: float = 1.0, + b: float = 0.0, +): + marker_msg = Marker() + marker_msg.header.frame_id = point_stamped.header.frame_id + marker_msg.header.stamp = point_stamped.header.stamp + marker_msg.id = idx + marker_msg.type = Marker.SPHERE + marker_msg.action = Marker.ADD + marker_msg.pose.position = point_stamped.point + marker_msg.pose.orientation.w = 1.0 + marker_msg.scale.x = 0.1 + marker_msg.scale.y = 0.1 + marker_msg.scale.z = 0.1 + marker_msg.color.a = 1.0 + marker_msg.color.r = r + marker_msg.color.g = g + marker_msg.color.b = b + return marker_msg + + +def create_and_publish_marker( + publisher: rospy.Publisher, + point_stamped: PointStamped, + idx: Union[int, None] = None, + r: float = 0.0, + g: float = 1.0, + b: float = 0.0, +): + if idx is None: + global publisher_counts + idx = publisher_counts[publisher] + publisher_counts[publisher] += 1 + + marker_msg = create_marker(point_stamped, idx, r, g, b) + publisher.publish(marker_msg) diff --git a/common/helpers/tf_module/CMakeLists.txt b/common/helpers/tf_module/CMakeLists.txt deleted file mode 100644 index 4867b5a6f..000000000 --- a/common/helpers/tf_module/CMakeLists.txt +++ /dev/null @@ -1,212 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(tf_module) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - geometry_msgs - message_generation - roscpp - rospy - sensor_msgs - std_msgs -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder - add_service_files( - FILES - TfTransform.srv - BaseTransform.srv - LatestTransform.srv - ApplyTransform.srv - ) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here - generate_messages( - DEPENDENCIES - geometry_msgs sensor_msgs std_msgs - ) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES tf_module -# CATKIN_DEPENDS geometry_msgs message_generation roscpp rospy senson_msgs std_msgs -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/tf_module.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/tf_module_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -#catkin_install_python(PROGRAMS - # src/tf_module/tf_transforms.py - #DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -#) - - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_tf_module.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/common/helpers/tf_module/README.md b/common/helpers/tf_module/README.md deleted file mode 100644 index 0ee548b5a..000000000 --- a/common/helpers/tf_module/README.md +++ /dev/null @@ -1,123 +0,0 @@ -# tf_module - -The tf_module package - -This package is maintained by: -- [nicole](mailto:nicole@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- geometry_msgs (build) -- message_generation (build) -- roscpp (build) -- rospy (build) -- sensor_msgs (build) -- std_msgs (build) -- geometry_msgs (exec) -- roscpp (exec) -- rospy (exec) -- sensor_msgs (exec) -- std_msgs (exec) -- catkin (buildtool) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -#### `tf_transforms` - -No description provided. - - -### Messages - -This package has no messages. - -### Services - -#### `ApplyTransform` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| points | geometry_msgs/Point[] | point we want to transform | -| transform | geometry_msgs/TransformStamped | the transform we want to use | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| new_points | geometry_msgs/Point[] | the transformed point | - -#### `TfTransform` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| pose_array | geometry_msgs/PoseArray | | -| pointcloud | sensor_msgs/PointCloud2 | | -| point | geometry_msgs/PointStamped | | -| target_frame | std_msgs/String | | -| source_frame | std_msgs/String | | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| target_pose_array | geometry_msgs/PoseArray | | -| target_pointcloud | sensor_msgs/PointCloud2 | | -| target_point | geometry_msgs/PointStamped | | -| translation_and_rotation | geometry_msgs/Pose | | - -#### `LatestTransform` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| from_frame | string | source frame | -| target_frame | string | target frame | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| transform | geometry_msgs/TransformStamped | transform | - -#### `BaseTransform` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| points | geometry_msgs/Point[] | point we want to transform | -| frame | std_msgs/String | the frame of the point | -| target_frame | std_msgs/String | the frame we want to transform the point to | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| new_points | geometry_msgs/PointStamped[] | the transformed point | - - -### Actions - -This package has no actions. diff --git a/common/helpers/tf_module/launch/tf_transforms.launch b/common/helpers/tf_module/launch/tf_transforms.launch deleted file mode 100644 index bc8f94bc0..000000000 --- a/common/helpers/tf_module/launch/tf_transforms.launch +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/common/helpers/tf_module/src/tf_module/__init__.py b/common/helpers/tf_module/src/tf_module/__init__.py deleted file mode 100644 index 644396d55..000000000 --- a/common/helpers/tf_module/src/tf_module/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# from .tf_transforms_base import * \ No newline at end of file diff --git a/common/helpers/tf_module/src/tf_module/tf_transforms_base.py b/common/helpers/tf_module/src/tf_module/tf_transforms_base.py deleted file mode 100755 index 5176ef063..000000000 --- a/common/helpers/tf_module/src/tf_module/tf_transforms_base.py +++ /dev/null @@ -1,221 +0,0 @@ -#!/usr/bin/env python -import rospy -import tf2_ros -import tf2_geometry_msgs -import tf2_sensor_msgs - -from geometry_msgs.msg import PoseStamped, PoseArray, Pose, TransformStamped -from tf_module.srv import TfTransform, TfTransformResponse, LatestTransform, LatestTransformResponse, ApplyTransform, \ - ApplyTransformResponse, BaseTransform, BaseTransformResponse - -from geometry_msgs.msg import PoseStamped, Vector3Stamped, PointStamped, WrenchStamped -import PyKDL -import rospy -import tf2_ros -import copy - - -def tf_transform(msg): - print(msg) - tf_response = TfTransformResponse() - if msg.pose_array.header.frame_id != '': - transformation = get_transform(source_frame=msg.pose_array.header.frame_id, target_frame=msg.target_frame.data) - if transformation: - pose_array = PoseArray() - pose_array.header.frame_id = msg.target_frame.data - pose_array.header.stamp = rospy.get_rostime() - - for pose in msg.pose_array.poses: - new_pose = tf2_geometry_msgs.do_transform_pose(PoseStamped(pose=pose), transformation).pose - pose_array.poses.append(new_pose) - - tf_response.target_pose_array = pose_array - else: - print('Error: No transformation') - if msg.pointcloud.header.frame_id != '': - transformation = get_transform(source_frame=msg.pointcloud.header.frame_id, target_frame=msg.target_frame.data, stamp=rospy.Time(0)) - if transformation: - new_pointcloud = tf2_sensor_msgs.do_transform_cloud(msg.pointcloud, transformation) - tf_response.target_pointcloud = new_pointcloud - else: - print('Error: No trasnformation') - if msg.point.header.frame_id != '': - transformation = get_transform(source_frame=msg.point.header.frame_id, target_frame=msg.target_frame.data, stamp=rospy.Time(0)) - if transformation: - try: - new_point = do_transform_point(msg.point, transformation) - tf_response.target_point = new_point - except Exception as e: - print(e) - print("---") - else: - print('Error: No trasnformation') - - p = None - if msg.source_frame.data != '': - transformation = get_transform(source_frame=msg.source_frame.data, target_frame=msg.target_frame.data, stamp=rospy.Time(0)) - print(transformation, ' theirs') - # return [t.x, t.y, t.z], [r.x, r.y, r.z, r.w] - lt_trans, lt_rot = lookupTransform(target_frame=msg.target_frame.data, source_frame=msg.source_frame.data, - time=rospy.Time(0)) - print(lt_trans, lt_rot, ' mine ') - p = Pose() - # return [t.x, t.y, t.z], [r.x, r.y, r.z, r.w] - p.position.x = lt_trans[0] - p.position.y = lt_trans[1] - p.position.z = lt_trans[2] - p.orientation.x = lt_rot[0] - p.orientation.y = lt_rot[1] - p.orientation.z = lt_rot[2] - p.orientation.w = lt_rot[3] - tf_response.translation_and_rotation = p - - return tf_response - - -def strip_leading_slash(s): - return s[1:] if s.startswith("/") else s - - -def lookupTransform(target_frame=None, source_frame=None, time=None): - assert (source_frame and target_frame) - try: - msg = tfBuffer.lookup_transform(strip_leading_slash(target_frame), strip_leading_slash(source_frame), time) - t = msg.transform.translation - r = msg.transform.rotation - return [t.x, t.y, t.z], [r.x, r.y, r.z, r.w] - except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as e: - print(e) - return None - - -# KDL stuff -def do_transform_point(point, transform): - p = transform_to_kdl(transform) * PyKDL.Vector(point.point.x, point.point.y, point.point.z) - res = PointStamped() - res.point.x = p[0] - res.point.y = p[1] - res.point.z = p[2] - res.header = transform.header - return res - - -tf2_ros.TransformRegistration().add(PointStamped, do_transform_point) - - -def transform_to_kdl(t): - return PyKDL.Frame(PyKDL.Rotation.Quaternion(t.transform.rotation.x, t.transform.rotation.y, - t.transform.rotation.z, t.transform.rotation.w), - PyKDL.Vector(t.transform.translation.x, - t.transform.translation.y, - t.transform.translation.z)) - - -# KDL stuff end - -# base functions for transformations -def transform_point_from_given(point, from_frame="base_laser_link", to_frame="map"): - try: - tr = get_transform(from_frame, to_frame, rospy.Time(0)) - new_point = apply_transform(point, tr, is_input_ps=False) - except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as e: - rospy.logwarn("Failed to transform point.") - rospy.logwarn(e) - new_point = PointStamped() - - return new_point - - -def apply_transform(ps_, transform, is_input_ps=True, target="xtion_rgb_optical_frame"): - """ - Apply transform of PointStamped or Point, with a given transform - """ - # assert (ps_ and transform) - if is_input_ps: - tr_point = do_transform_point(ps_, transform) - else: - ps = PointStamped() - ps.point = ps_ - try: - tr_point = do_transform_point(ps, transform) - except Exception as e: - rospy.logwarn("Failed to transform point.") - rospy.logwarn(e) - print(transform) - return PointStamped() - - return tr_point - - -def get_transform(source_frame, target_frame, stamp): - """ - Converts to target frame - Returns the pose in the target frame - """ - # assert (source_frame and target_frame) - try: - transformation = tfBuffer.lookup_transform(target_frame, source_frame, stamp, rospy.Duration(0.1)) - except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as e: - print(e) - transformation = TransformStamped() - - return transformation - -# base functions for transformations - -# callbacks -def transform_point_cb(msg): - """ - Simply transform a point given the point and the frame - """ - points = msg.points - from_frame = msg.frame.data - to_frame = msg.target_frame.data - - new_points = [] - for p in points: - new_point = transform_point_from_given(p, from_frame, to_frame) - new_points.append(new_point) - return BaseTransformResponse(new_points) - - -def apply_transform_cb(msg): - """ - The callback to apply transform - """ - rospy.logwarn('Applying transform') - new_p = [] - for p in msg.points: - p_tf = apply_transform(p, msg.transform, is_input_ps=False) - new_p.append(p_tf.point) - return ApplyTransformResponse(new_p) - - -def get_latest_transform_cb(msg): - """ - The callback to get the latest transform - """ - to_frame = msg.target_frame - from_frame = msg.from_frame - t = TransformStamped() - try: - t = get_transform(source_frame=from_frame, target_frame=to_frame, stamp=rospy.Time(0)) - except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as e: - print(e) - - return LatestTransformResponse(t) - -# callbacks end - -if __name__ == '__main__': - rospy.init_node('tf_transform_node') - s = rospy.Service('tf_transform', TfTransform, tf_transform) - s2 = rospy.Service('latest_transform', LatestTransform, get_latest_transform_cb) - s3 = rospy.Service('apply_transform', ApplyTransform, apply_transform_cb) - s4 = rospy.Service('base_transform', BaseTransform, transform_point_cb) - - tfBuffer = tf2_ros.Buffer() - listener = tf2_ros.TransformListener(tfBuffer) - - rospy.loginfo('Ready to transform!') - rospy.spin() diff --git a/common/helpers/tf_module/src/tf_module/transformations.py b/common/helpers/tf_module/src/tf_module/transformations.py deleted file mode 100755 index 8872cbe84..000000000 --- a/common/helpers/tf_module/src/tf_module/transformations.py +++ /dev/null @@ -1,1705 +0,0 @@ -# -*- coding: utf-8 -*- -# transformations.py - -# Copyright (c) 2006, Christoph Gohlke -# Copyright (c) 2006-2009, The Regents of the University of California -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of the copyright holders nor the names of any -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -"""Homogeneous Transformation Matrices and Quaternions. - -A library for calculating 4x4 matrices for translating, rotating, reflecting, -scaling, shearing, projecting, orthogonalizing, and superimposing arrays of -3D homogeneous coordinates as well as for converting between rotation matrices, -Euler angles, and quaternions. Also includes an Arcball control object and -functions to decompose transformation matrices. - -:Authors: - `Christoph Gohlke `__, - Laboratory for Fluorescence Dynamics, University of California, Irvine - -:Version: 20090418 - -Requirements ------------- - -* `Python 2.6 `__ -* `Numpy 1.3 `__ -* `transformations.c 20090418 `__ - (optional implementation of some functions in C) - -Notes ------ - -Matrices (M) can be inverted using numpy.linalg.inv(M), concatenated using -numpy.dot(M0, M1), or used to transform homogeneous coordinates (v) using -numpy.dot(M, v) for shape (4, \*) "point of arrays", respectively -numpy.dot(v, M.T) for shape (\*, 4) "array of points". - -Calculations are carried out with numpy.float64 precision. - -This Python implementation is not optimized for speed. - -Vector, point, quaternion, and matrix function arguments are expected to be -"array like", i.e. tuple, list, or numpy arrays. - -Return types are numpy arrays unless specified otherwise. - -Angles are in radians unless specified otherwise. - -Quaternions ix+jy+kz+w are represented as [x, y, z, w]. - -Use the transpose of transformation matrices for OpenGL glMultMatrixd(). - -A triple of Euler angles can be applied/interpreted in 24 ways, which can -be specified using a 4 character string or encoded 4-tuple: - - *Axes 4-string*: e.g. 'sxyz' or 'ryxy' - - - first character : rotations are applied to 's'tatic or 'r'otating frame - - remaining characters : successive rotation axis 'x', 'y', or 'z' - - *Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1) - - - inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix. - - parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed - by 'z', or 'z' is followed by 'x'. Otherwise odd (1). - - repetition : first and last axis are same (1) or different (0). - - frame : rotations are applied to static (0) or rotating (1) frame. - -References ----------- - -(1) Matrices and transformations. Ronald Goldman. - In "Graphics Gems I", pp 472-475. Morgan Kaufmann, 1990. -(2) More matrices and transformations: shear and pseudo-perspective. - Ronald Goldman. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991. -(3) Decomposing a matrix into simple transformations. Spencer Thomas. - In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991. -(4) Recovering the data from the transformation matrix. Ronald Goldman. - In "Graphics Gems II", pp 324-331. Morgan Kaufmann, 1991. -(5) Euler angle conversion. Ken Shoemake. - In "Graphics Gems IV", pp 222-229. Morgan Kaufmann, 1994. -(6) Arcball rotation control. Ken Shoemake. - In "Graphics Gems IV", pp 175-192. Morgan Kaufmann, 1994. -(7) Representing attitude: Euler angles, unit quaternions, and rotation - vectors. James Diebel. 2006. -(8) A discussion of the solution for the best rotation to relate two sets - of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828. -(9) Closed-form solution of absolute orientation using unit quaternions. - BKP Horn. J Opt Soc Am A. 1987. 4(4), 629-642. -(10) Quaternions. Ken Shoemake. - http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf -(11) From quaternion to matrix and back. JMP van Waveren. 2005. - http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm -(12) Uniform random rotations. Ken Shoemake. - In "Graphics Gems III", pp 124-132. Morgan Kaufmann, 1992. - - -Examples --------- - ->>> alpha, beta, gamma = 0.123, -1.234, 2.345 ->>> origin, xaxis, yaxis, zaxis = (0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1) ->>> I = identity_matrix() ->>> Rx = rotation_matrix(alpha, xaxis) ->>> Ry = rotation_matrix(beta, yaxis) ->>> Rz = rotation_matrix(gamma, zaxis) ->>> R = concatenate_matrices(Rx, Ry, Rz) ->>> euler = euler_from_matrix(R, 'rxyz') ->>> numpy.allclose([alpha, beta, gamma], euler) -True ->>> Re = euler_matrix(alpha, beta, gamma, 'rxyz') ->>> is_same_transform(R, Re) -True ->>> al, be, ga = euler_from_matrix(Re, 'rxyz') ->>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz')) -True ->>> qx = quaternion_about_axis(alpha, xaxis) ->>> qy = quaternion_about_axis(beta, yaxis) ->>> qz = quaternion_about_axis(gamma, zaxis) ->>> q = quaternion_multiply(qx, qy) ->>> q = quaternion_multiply(q, qz) ->>> Rq = quaternion_matrix(q) ->>> is_same_transform(R, Rq) -True ->>> S = scale_matrix(1.23, origin) ->>> T = translation_matrix((1, 2, 3)) ->>> Z = shear_matrix(beta, xaxis, origin, zaxis) ->>> R = random_rotation_matrix(numpy.random.rand(3)) ->>> M = concatenate_matrices(T, R, Z, S) ->>> scale, shear, angles, trans, persp = decompose_matrix(M) ->>> numpy.allclose(scale, 1.23) -True ->>> numpy.allclose(trans, (1, 2, 3)) -True ->>> numpy.allclose(shear, (0, math.tan(beta), 0)) -True ->>> is_same_transform(R, euler_matrix(axes='sxyz', *angles)) -True ->>> M1 = compose_matrix(scale, shear, angles, trans, persp) ->>> is_same_transform(M, M1) -True - -""" - -from __future__ import division - -import warnings -import math - -import numpy - -# Documentation in HTML format can be generated with Epydoc -__docformat__ = "restructuredtext en" - - -def identity_matrix(): - """Return 4x4 identity/unit matrix. - - >>> I = identity_matrix() - >>> numpy.allclose(I, numpy.dot(I, I)) - True - >>> numpy.sum(I), numpy.trace(I) - (4.0, 4.0) - >>> numpy.allclose(I, numpy.identity(4, dtype=numpy.float64)) - True - - """ - return numpy.identity(4, dtype=numpy.float64) - - -def translation_matrix(direction): - """Return matrix to translate by direction vector. - - >>> v = numpy.random.random(3) - 0.5 - >>> numpy.allclose(v, translation_matrix(v)[:3, 3]) - True - - """ - M = numpy.identity(4) - M[:3, 3] = direction[:3] - return M - - -def translation_from_matrix(matrix): - """Return translation vector from translation matrix. - - >>> v0 = numpy.random.random(3) - 0.5 - >>> v1 = translation_from_matrix(translation_matrix(v0)) - >>> numpy.allclose(v0, v1) - True - - """ - return numpy.array(matrix, copy=False)[:3, 3].copy() - - -def reflection_matrix(point, normal): - """Return matrix to mirror at plane defined by point and normal vector. - - >>> v0 = numpy.random.random(4) - 0.5 - >>> v0[3] = 1.0 - >>> v1 = numpy.random.random(3) - 0.5 - >>> R = reflection_matrix(v0, v1) - >>> numpy.allclose(2., numpy.trace(R)) - True - >>> numpy.allclose(v0, numpy.dot(R, v0)) - True - >>> v2 = v0.copy() - >>> v2[:3] += v1 - >>> v3 = v0.copy() - >>> v2[:3] -= v1 - >>> numpy.allclose(v2, numpy.dot(R, v3)) - True - - """ - normal = unit_vector(normal[:3]) - M = numpy.identity(4) - M[:3, :3] -= 2.0 * numpy.outer(normal, normal) - M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal - return M - - -def reflection_from_matrix(matrix): - """Return mirror plane point and normal vector from reflection matrix. - - >>> v0 = numpy.random.random(3) - 0.5 - >>> v1 = numpy.random.random(3) - 0.5 - >>> M0 = reflection_matrix(v0, v1) - >>> point, normal = reflection_from_matrix(M0) - >>> M1 = reflection_matrix(point, normal) - >>> is_same_transform(M0, M1) - True - - """ - M = numpy.array(matrix, dtype=numpy.float64, copy=False) - # normal: unit eigenvector corresponding to eigenvalue -1 - l, V = numpy.linalg.eig(M[:3, :3]) - i = numpy.where(abs(numpy.real(l) + 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no unit eigenvector corresponding to eigenvalue -1") - normal = numpy.real(V[:, i[0]]).squeeze() - # point: any unit eigenvector corresponding to eigenvalue 1 - l, V = numpy.linalg.eig(M) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no unit eigenvector corresponding to eigenvalue 1") - point = numpy.real(V[:, i[-1]]).squeeze() - point /= point[3] - return point, normal - - -def rotation_matrix(angle, direction, point=None): - """Return matrix to rotate about axis defined by point and direction. - - >>> angle = (random.random() - 0.5) * (2*math.pi) - >>> direc = numpy.random.random(3) - 0.5 - >>> point = numpy.random.random(3) - 0.5 - >>> R0 = rotation_matrix(angle, direc, point) - >>> R1 = rotation_matrix(angle-2*math.pi, direc, point) - >>> is_same_transform(R0, R1) - True - >>> R0 = rotation_matrix(angle, direc, point) - >>> R1 = rotation_matrix(-angle, -direc, point) - >>> is_same_transform(R0, R1) - True - >>> I = numpy.identity(4, numpy.float64) - >>> numpy.allclose(I, rotation_matrix(math.pi*2, direc)) - True - >>> numpy.allclose(2., numpy.trace(rotation_matrix(math.pi/2, - ... direc, point))) - True - - """ - sina = math.sin(angle) - cosa = math.cos(angle) - direction = unit_vector(direction[:3]) - # rotation matrix around unit vector - R = numpy.array(((cosa, 0.0, 0.0), - (0.0, cosa, 0.0), - (0.0, 0.0, cosa)), dtype=numpy.float64) - R += numpy.outer(direction, direction) * (1.0 - cosa) - direction *= sina - R += numpy.array((( 0.0, -direction[2], direction[1]), - ( direction[2], 0.0, -direction[0]), - (-direction[1], direction[0], 0.0)), - dtype=numpy.float64) - M = numpy.identity(4) - M[:3, :3] = R - if point is not None: - # rotation not around origin - point = numpy.array(point[:3], dtype=numpy.float64, copy=False) - M[:3, 3] = point - numpy.dot(R, point) - return M - - -def rotation_from_matrix(matrix): - """Return rotation angle and axis from rotation matrix. - - >>> angle = (random.random() - 0.5) * (2*math.pi) - >>> direc = numpy.random.random(3) - 0.5 - >>> point = numpy.random.random(3) - 0.5 - >>> R0 = rotation_matrix(angle, direc, point) - >>> angle, direc, point = rotation_from_matrix(R0) - >>> R1 = rotation_matrix(angle, direc, point) - >>> is_same_transform(R0, R1) - True - - """ - R = numpy.array(matrix, dtype=numpy.float64, copy=False) - R33 = R[:3, :3] - # direction: unit eigenvector of R33 corresponding to eigenvalue of 1 - l, W = numpy.linalg.eig(R33.T) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no unit eigenvector corresponding to eigenvalue 1") - direction = numpy.real(W[:, i[-1]]).squeeze() - # point: unit eigenvector of R33 corresponding to eigenvalue of 1 - l, Q = numpy.linalg.eig(R) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no unit eigenvector corresponding to eigenvalue 1") - point = numpy.real(Q[:, i[-1]]).squeeze() - point /= point[3] - # rotation angle depending on direction - cosa = (numpy.trace(R33) - 1.0) / 2.0 - if abs(direction[2]) > 1e-8: - sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2] - elif abs(direction[1]) > 1e-8: - sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1] - else: - sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0] - angle = math.atan2(sina, cosa) - return angle, direction, point - - -def scale_matrix(factor, origin=None, direction=None): - """Return matrix to scale by factor around origin in direction. - - Use factor -1 for point symmetry. - - >>> v = (numpy.random.rand(4, 5) - 0.5) * 20.0 - >>> v[3] = 1.0 - >>> S = scale_matrix(-1.234) - >>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3]) - True - >>> factor = random.random() * 10 - 5 - >>> origin = numpy.random.random(3) - 0.5 - >>> direct = numpy.random.random(3) - 0.5 - >>> S = scale_matrix(factor, origin) - >>> S = scale_matrix(factor, origin, direct) - - """ - if direction is None: - # uniform scaling - M = numpy.array(((factor, 0.0, 0.0, 0.0), - (0.0, factor, 0.0, 0.0), - (0.0, 0.0, factor, 0.0), - (0.0, 0.0, 0.0, 1.0)), dtype=numpy.float64) - if origin is not None: - M[:3, 3] = origin[:3] - M[:3, 3] *= 1.0 - factor - else: - # nonuniform scaling - direction = unit_vector(direction[:3]) - factor = 1.0 - factor - M = numpy.identity(4) - M[:3, :3] -= factor * numpy.outer(direction, direction) - if origin is not None: - M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction - return M - - -def scale_from_matrix(matrix): - """Return scaling factor, origin and direction from scaling matrix. - - >>> factor = random.random() * 10 - 5 - >>> origin = numpy.random.random(3) - 0.5 - >>> direct = numpy.random.random(3) - 0.5 - >>> S0 = scale_matrix(factor, origin) - >>> factor, origin, direction = scale_from_matrix(S0) - >>> S1 = scale_matrix(factor, origin, direction) - >>> is_same_transform(S0, S1) - True - >>> S0 = scale_matrix(factor, origin, direct) - >>> factor, origin, direction = scale_from_matrix(S0) - >>> S1 = scale_matrix(factor, origin, direction) - >>> is_same_transform(S0, S1) - True - - """ - M = numpy.array(matrix, dtype=numpy.float64, copy=False) - M33 = M[:3, :3] - factor = numpy.trace(M33) - 2.0 - try: - # direction: unit eigenvector corresponding to eigenvalue factor - l, V = numpy.linalg.eig(M33) - i = numpy.where(abs(numpy.real(l) - factor) < 1e-8)[0][0] - direction = numpy.real(V[:, i]).squeeze() - direction /= vector_norm(direction) - except IndexError: - # uniform scaling - factor = (factor + 2.0) / 3.0 - direction = None - # origin: any eigenvector corresponding to eigenvalue 1 - l, V = numpy.linalg.eig(M) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no eigenvector corresponding to eigenvalue 1") - origin = numpy.real(V[:, i[-1]]).squeeze() - origin /= origin[3] - return factor, origin, direction - - -def projection_matrix(point, normal, direction=None, - perspective=None, pseudo=False): - """Return matrix to project onto plane defined by point and normal. - - Using either perspective point, projection direction, or none of both. - - If pseudo is True, perspective projections will preserve relative depth - such that Perspective = dot(Orthogonal, PseudoPerspective). - - >>> P = projection_matrix((0, 0, 0), (1, 0, 0)) - >>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:]) - True - >>> point = numpy.random.random(3) - 0.5 - >>> normal = numpy.random.random(3) - 0.5 - >>> direct = numpy.random.random(3) - 0.5 - >>> persp = numpy.random.random(3) - 0.5 - >>> P0 = projection_matrix(point, normal) - >>> P1 = projection_matrix(point, normal, direction=direct) - >>> P2 = projection_matrix(point, normal, perspective=persp) - >>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True) - >>> is_same_transform(P2, numpy.dot(P0, P3)) - True - >>> P = projection_matrix((3, 0, 0), (1, 1, 0), (1, 0, 0)) - >>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20.0 - >>> v0[3] = 1.0 - >>> v1 = numpy.dot(P, v0) - >>> numpy.allclose(v1[1], v0[1]) - True - >>> numpy.allclose(v1[0], 3.0-v1[1]) - True - - """ - M = numpy.identity(4) - point = numpy.array(point[:3], dtype=numpy.float64, copy=False) - normal = unit_vector(normal[:3]) - if perspective is not None: - # perspective projection - perspective = numpy.array(perspective[:3], dtype=numpy.float64, - copy=False) - M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal) - M[:3, :3] -= numpy.outer(perspective, normal) - if pseudo: - # preserve relative depth - M[:3, :3] -= numpy.outer(normal, normal) - M[:3, 3] = numpy.dot(point, normal) * (perspective+normal) - else: - M[:3, 3] = numpy.dot(point, normal) * perspective - M[3, :3] = -normal - M[3, 3] = numpy.dot(perspective, normal) - elif direction is not None: - # parallel projection - direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False) - scale = numpy.dot(direction, normal) - M[:3, :3] -= numpy.outer(direction, normal) / scale - M[:3, 3] = direction * (numpy.dot(point, normal) / scale) - else: - # orthogonal projection - M[:3, :3] -= numpy.outer(normal, normal) - M[:3, 3] = numpy.dot(point, normal) * normal - return M - - -def projection_from_matrix(matrix, pseudo=False): - """Return projection plane and perspective point from projection matrix. - - Return values are same as arguments for projection_matrix function: - point, normal, direction, perspective, and pseudo. - - >>> point = numpy.random.random(3) - 0.5 - >>> normal = numpy.random.random(3) - 0.5 - >>> direct = numpy.random.random(3) - 0.5 - >>> persp = numpy.random.random(3) - 0.5 - >>> P0 = projection_matrix(point, normal) - >>> result = projection_from_matrix(P0) - >>> P1 = projection_matrix(*result) - >>> is_same_transform(P0, P1) - True - >>> P0 = projection_matrix(point, normal, direct) - >>> result = projection_from_matrix(P0) - >>> P1 = projection_matrix(*result) - >>> is_same_transform(P0, P1) - True - >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False) - >>> result = projection_from_matrix(P0, pseudo=False) - >>> P1 = projection_matrix(*result) - >>> is_same_transform(P0, P1) - True - >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True) - >>> result = projection_from_matrix(P0, pseudo=True) - >>> P1 = projection_matrix(*result) - >>> is_same_transform(P0, P1) - True - - """ - M = numpy.array(matrix, dtype=numpy.float64, copy=False) - M33 = M[:3, :3] - l, V = numpy.linalg.eig(M) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] - if not pseudo and len(i): - # point: any eigenvector corresponding to eigenvalue 1 - point = numpy.real(V[:, i[-1]]).squeeze() - point /= point[3] - # direction: unit eigenvector corresponding to eigenvalue 0 - l, V = numpy.linalg.eig(M33) - i = numpy.where(abs(numpy.real(l)) < 1e-8)[0] - if not len(i): - raise ValueError("no eigenvector corresponding to eigenvalue 0") - direction = numpy.real(V[:, i[0]]).squeeze() - direction /= vector_norm(direction) - # normal: unit eigenvector of M33.T corresponding to eigenvalue 0 - l, V = numpy.linalg.eig(M33.T) - i = numpy.where(abs(numpy.real(l)) < 1e-8)[0] - if len(i): - # parallel projection - normal = numpy.real(V[:, i[0]]).squeeze() - normal /= vector_norm(normal) - return point, normal, direction, None, False - else: - # orthogonal projection, where normal equals direction vector - return point, direction, None, None, False - else: - # perspective projection - i = numpy.where(abs(numpy.real(l)) > 1e-8)[0] - if not len(i): - raise ValueError( - "no eigenvector not corresponding to eigenvalue 0") - point = numpy.real(V[:, i[-1]]).squeeze() - point /= point[3] - normal = - M[3, :3] - perspective = M[:3, 3] / numpy.dot(point[:3], normal) - if pseudo: - perspective -= normal - return point, normal, None, perspective, pseudo - - -def clip_matrix(left, right, bottom, top, near, far, perspective=False): - """Return matrix to obtain normalized device coordinates from frustrum. - - The frustrum bounds are axis-aligned along x (left, right), - y (bottom, top) and z (near, far). - - Normalized device coordinates are in range [-1, 1] if coordinates are - inside the frustrum. - - If perspective is True the frustrum is a truncated pyramid with the - perspective point at origin and direction along z axis, otherwise an - orthographic canonical view volume (a box). - - Homogeneous coordinates transformed by the perspective clip matrix - need to be dehomogenized (devided by w coordinate). - - >>> frustrum = numpy.random.rand(6) - >>> frustrum[1] += frustrum[0] - >>> frustrum[3] += frustrum[2] - >>> frustrum[5] += frustrum[4] - >>> M = clip_matrix(*frustrum, perspective=False) - >>> numpy.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0]) - array([-1., -1., -1., 1.]) - >>> numpy.dot(M, [frustrum[1], frustrum[3], frustrum[5], 1.0]) - array([ 1., 1., 1., 1.]) - >>> M = clip_matrix(*frustrum, perspective=True) - >>> v = numpy.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0]) - >>> v / v[3] - array([-1., -1., -1., 1.]) - >>> v = numpy.dot(M, [frustrum[1], frustrum[3], frustrum[4], 1.0]) - >>> v / v[3] - array([ 1., 1., -1., 1.]) - - """ - if left >= right or bottom >= top or near >= far: - raise ValueError("invalid frustrum") - if perspective: - if near <= _EPS: - raise ValueError("invalid frustrum: near <= 0") - t = 2.0 * near - M = ((-t/(right-left), 0.0, (right+left)/(right-left), 0.0), - (0.0, -t/(top-bottom), (top+bottom)/(top-bottom), 0.0), - (0.0, 0.0, -(far+near)/(far-near), t*far/(far-near)), - (0.0, 0.0, -1.0, 0.0)) - else: - M = ((2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)), - (0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)), - (0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)), - (0.0, 0.0, 0.0, 1.0)) - return numpy.array(M, dtype=numpy.float64) - - -def shear_matrix(angle, direction, point, normal): - """Return matrix to shear by angle along direction vector on shear plane. - - The shear plane is defined by a point and normal vector. The direction - vector must be orthogonal to the plane's normal vector. - - A point P is transformed by the shear matrix into P" such that - the vector P-P" is parallel to the direction vector and its extent is - given by the angle of P-P'-P", where P' is the orthogonal projection - of P onto the shear plane. - - >>> angle = (random.random() - 0.5) * 4*math.pi - >>> direct = numpy.random.random(3) - 0.5 - >>> point = numpy.random.random(3) - 0.5 - >>> normal = numpy.cross(direct, numpy.random.random(3)) - >>> S = shear_matrix(angle, direct, point, normal) - >>> numpy.allclose(1.0, numpy.linalg.det(S)) - True - - """ - normal = unit_vector(normal[:3]) - direction = unit_vector(direction[:3]) - if abs(numpy.dot(normal, direction)) > 1e-6: - raise ValueError("direction and normal vectors are not orthogonal") - angle = math.tan(angle) - M = numpy.identity(4) - M[:3, :3] += angle * numpy.outer(direction, normal) - M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction - return M - - -def shear_from_matrix(matrix): - """Return shear angle, direction and plane from shear matrix. - - >>> angle = (random.random() - 0.5) * 4*math.pi - >>> direct = numpy.random.random(3) - 0.5 - >>> point = numpy.random.random(3) - 0.5 - >>> normal = numpy.cross(direct, numpy.random.random(3)) - >>> S0 = shear_matrix(angle, direct, point, normal) - >>> angle, direct, point, normal = shear_from_matrix(S0) - >>> S1 = shear_matrix(angle, direct, point, normal) - >>> is_same_transform(S0, S1) - True - - """ - M = numpy.array(matrix, dtype=numpy.float64, copy=False) - M33 = M[:3, :3] - # normal: cross independent eigenvectors corresponding to the eigenvalue 1 - l, V = numpy.linalg.eig(M33) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-4)[0] - if len(i) < 2: - raise ValueError("No two linear independent eigenvectors found {}".format(l)) - V = numpy.real(V[:, i]).squeeze().T - lenorm = -1.0 - for i0, i1 in ((0, 1), (0, 2), (1, 2)): - n = numpy.cross(V[i0], V[i1]) - l = vector_norm(n) - if l > lenorm: - lenorm = l - normal = n - normal /= lenorm - # direction and angle - direction = numpy.dot(M33 - numpy.identity(3), normal) - angle = vector_norm(direction) - direction /= angle - angle = math.atan(angle) - # point: eigenvector corresponding to eigenvalue 1 - l, V = numpy.linalg.eig(M) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no eigenvector corresponding to eigenvalue 1") - point = numpy.real(V[:, i[-1]]).squeeze() - point /= point[3] - return angle, direction, point, normal - - -def decompose_matrix(matrix): - """Return sequence of transformations from transformation matrix. - - matrix : array_like - Non-degenerative homogeneous transformation matrix - - Return tuple of: - scale : vector of 3 scaling factors - shear : list of shear factors for x-y, x-z, y-z axes - angles : list of Euler angles about static x, y, z axes - translate : translation vector along x, y, z axes - perspective : perspective partition of matrix - - Raise ValueError if matrix is of wrong type or degenerative. - - >>> T0 = translation_matrix((1, 2, 3)) - >>> scale, shear, angles, trans, persp = decompose_matrix(T0) - >>> T1 = translation_matrix(trans) - >>> numpy.allclose(T0, T1) - True - >>> S = scale_matrix(0.123) - >>> scale, shear, angles, trans, persp = decompose_matrix(S) - >>> scale[0] - 0.123 - >>> R0 = euler_matrix(1, 2, 3) - >>> scale, shear, angles, trans, persp = decompose_matrix(R0) - >>> R1 = euler_matrix(*angles) - >>> numpy.allclose(R0, R1) - True - - """ - M = numpy.array(matrix, dtype=numpy.float64, copy=True).T - if abs(M[3, 3]) < _EPS: - raise ValueError("M[3, 3] is zero") - M /= M[3, 3] - P = M.copy() - P[:, 3] = 0, 0, 0, 1 - if not numpy.linalg.det(P): - raise ValueError("Matrix is singular") - - scale = numpy.zeros((3, ), dtype=numpy.float64) - shear = [0, 0, 0] - angles = [0, 0, 0] - - if any(abs(M[:3, 3]) > _EPS): - perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T)) - M[:, 3] = 0, 0, 0, 1 - else: - perspective = numpy.array((0, 0, 0, 1), dtype=numpy.float64) - - translate = M[3, :3].copy() - M[3, :3] = 0 - - row = M[:3, :3].copy() - scale[0] = vector_norm(row[0]) - row[0] /= scale[0] - shear[0] = numpy.dot(row[0], row[1]) - row[1] -= row[0] * shear[0] - scale[1] = vector_norm(row[1]) - row[1] /= scale[1] - shear[0] /= scale[1] - shear[1] = numpy.dot(row[0], row[2]) - row[2] -= row[0] * shear[1] - shear[2] = numpy.dot(row[1], row[2]) - row[2] -= row[1] * shear[2] - scale[2] = vector_norm(row[2]) - row[2] /= scale[2] - shear[1:] /= scale[2] - - if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0: - scale *= -1 - row *= -1 - - angles[1] = math.asin(-row[0, 2]) - if math.cos(angles[1]): - angles[0] = math.atan2(row[1, 2], row[2, 2]) - angles[2] = math.atan2(row[0, 1], row[0, 0]) - else: - #angles[0] = math.atan2(row[1, 0], row[1, 1]) - angles[0] = math.atan2(-row[2, 1], row[1, 1]) - angles[2] = 0.0 - - return scale, shear, angles, translate, perspective - - -def compose_matrix(scale=None, shear=None, angles=None, translate=None, - perspective=None): - """Return transformation matrix from sequence of transformations. - - This is the inverse of the decompose_matrix function. - - Sequence of transformations: - scale : vector of 3 scaling factors - shear : list of shear factors for x-y, x-z, y-z axes - angles : list of Euler angles about static x, y, z axes - translate : translation vector along x, y, z axes - perspective : perspective partition of matrix - - >>> scale = numpy.random.random(3) - 0.5 - >>> shear = numpy.random.random(3) - 0.5 - >>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi) - >>> trans = numpy.random.random(3) - 0.5 - >>> persp = numpy.random.random(4) - 0.5 - >>> M0 = compose_matrix(scale, shear, angles, trans, persp) - >>> result = decompose_matrix(M0) - >>> M1 = compose_matrix(*result) - >>> is_same_transform(M0, M1) - True - - """ - M = numpy.identity(4) - if perspective is not None: - P = numpy.identity(4) - P[3, :] = perspective[:4] - M = numpy.dot(M, P) - if translate is not None: - T = numpy.identity(4) - T[:3, 3] = translate[:3] - M = numpy.dot(M, T) - if angles is not None: - R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz') - M = numpy.dot(M, R) - if shear is not None: - Z = numpy.identity(4) - Z[1, 2] = shear[2] - Z[0, 2] = shear[1] - Z[0, 1] = shear[0] - M = numpy.dot(M, Z) - if scale is not None: - S = numpy.identity(4) - S[0, 0] = scale[0] - S[1, 1] = scale[1] - S[2, 2] = scale[2] - M = numpy.dot(M, S) - M /= M[3, 3] - return M - - -def orthogonalization_matrix(lengths, angles): - """Return orthogonalization matrix for crystallographic cell coordinates. - - Angles are expected in degrees. - - The de-orthogonalization matrix is the inverse. - - >>> O = orthogonalization_matrix((10., 10., 10.), (90., 90., 90.)) - >>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10) - True - >>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7]) - >>> numpy.allclose(numpy.sum(O), 43.063229) - True - - """ - a, b, c = lengths - angles = numpy.radians(angles) - sina, sinb, _ = numpy.sin(angles) - cosa, cosb, cosg = numpy.cos(angles) - co = (cosa * cosb - cosg) / (sina * sinb) - return numpy.array(( - ( a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0), - (-a*sinb*co, b*sina, 0.0, 0.0), - ( a*cosb, b*cosa, c, 0.0), - ( 0.0, 0.0, 0.0, 1.0)), - dtype=numpy.float64) - - -def superimposition_matrix(v0, v1, scaling=False, usesvd=True): - """Return matrix to transform given vector set into second vector set. - - v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 vectors. - - If usesvd is True, the weighted sum of squared deviations (RMSD) is - minimized according to the algorithm by W. Kabsch [8]. Otherwise the - quaternion based algorithm by B. Horn [9] is used (slower when using - this Python implementation). - - The returned matrix performs rotation, translation and uniform scaling - (if specified). - - >>> v0 = numpy.random.rand(3, 10) - >>> M = superimposition_matrix(v0, v0) - >>> numpy.allclose(M, numpy.identity(4)) - True - >>> R = random_rotation_matrix(numpy.random.random(3)) - >>> v0 = ((1,0,0), (0,1,0), (0,0,1), (1,1,1)) - >>> v1 = numpy.dot(R, v0) - >>> M = superimposition_matrix(v0, v1) - >>> numpy.allclose(v1, numpy.dot(M, v0)) - True - >>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20.0 - >>> v0[3] = 1.0 - >>> v1 = numpy.dot(R, v0) - >>> M = superimposition_matrix(v0, v1) - >>> numpy.allclose(v1, numpy.dot(M, v0)) - True - >>> S = scale_matrix(random.random()) - >>> T = translation_matrix(numpy.random.random(3)-0.5) - >>> M = concatenate_matrices(T, R, S) - >>> v1 = numpy.dot(M, v0) - >>> v0[:3] += numpy.random.normal(0.0, 1e-9, 300).reshape(3, -1) - >>> M = superimposition_matrix(v0, v1, scaling=True) - >>> numpy.allclose(v1, numpy.dot(M, v0)) - True - >>> M = superimposition_matrix(v0, v1, scaling=True, usesvd=False) - >>> numpy.allclose(v1, numpy.dot(M, v0)) - True - >>> v = numpy.empty((4, 100, 3), dtype=numpy.float64) - >>> v[:, :, 0] = v0 - >>> M = superimposition_matrix(v0, v1, scaling=True, usesvd=False) - >>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0])) - True - - """ - v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3] - v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3] - - if v0.shape != v1.shape or v0.shape[1] < 3: - raise ValueError("Vector sets are of wrong shape or type.") - - # move centroids to origin - t0 = numpy.mean(v0, axis=1) - t1 = numpy.mean(v1, axis=1) - v0 = v0 - t0.reshape(3, 1) - v1 = v1 - t1.reshape(3, 1) - - if usesvd: - # Singular Value Decomposition of covariance matrix - u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T)) - # rotation matrix from SVD orthonormal bases - R = numpy.dot(u, vh) - if numpy.linalg.det(R) < 0.0: - # R does not constitute right handed system - R -= numpy.outer(u[:, 2], vh[2, :]*2.0) - s[-1] *= -1.0 - # homogeneous transformation matrix - M = numpy.identity(4) - M[:3, :3] = R - else: - # compute symmetric matrix N - xx, yy, zz = numpy.sum(v0 * v1, axis=1) - xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1) - xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1) - N = ((xx+yy+zz, yz-zy, zx-xz, xy-yx), - (yz-zy, xx-yy-zz, xy+yx, zx+xz), - (zx-xz, xy+yx, -xx+yy-zz, yz+zy), - (xy-yx, zx+xz, yz+zy, -xx-yy+zz)) - # quaternion: eigenvector corresponding to most positive eigenvalue - l, V = numpy.linalg.eig(N) - q = V[:, numpy.argmax(l)] - q /= vector_norm(q) # unit quaternion - q = numpy.roll(q, -1) # move w component to end - # homogeneous transformation matrix - M = quaternion_matrix(q) - - # scale: ratio of rms deviations from centroid - if scaling: - v0 *= v0 - v1 *= v1 - M[:3, :3] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0)) - - # translation - M[:3, 3] = t1 - T = numpy.identity(4) - T[:3, 3] = -t0 - M = numpy.dot(M, T) - return M - - -def euler_matrix(ai, aj, ak, axes='sxyz'): - """Return homogeneous rotation matrix from Euler angles and axis sequence. - - ai, aj, ak : Euler's roll, pitch and yaw angles - axes : One of 24 axis sequences as string or encoded tuple - - >>> R = euler_matrix(1, 2, 3, 'syxz') - >>> numpy.allclose(numpy.sum(R[0]), -1.34786452) - True - >>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1)) - >>> numpy.allclose(numpy.sum(R[0]), -0.383436184) - True - >>> ai, aj, ak = (4.0*math.pi) * (numpy.random.random(3) - 0.5) - >>> for axes in _AXES2TUPLE.keys(): - ... R = euler_matrix(ai, aj, ak, axes) - >>> for axes in _TUPLE2AXES.keys(): - ... R = euler_matrix(ai, aj, ak, axes) - - """ - try: - firstaxis, parity, repetition, frame = _AXES2TUPLE[axes] - except (AttributeError, KeyError): - _ = _TUPLE2AXES[axes] - firstaxis, parity, repetition, frame = axes - - i = firstaxis - j = _NEXT_AXIS[i+parity] - k = _NEXT_AXIS[i-parity+1] - - if frame: - ai, ak = ak, ai - if parity: - ai, aj, ak = -ai, -aj, -ak - - si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak) - ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak) - cc, cs = ci*ck, ci*sk - sc, ss = si*ck, si*sk - - M = numpy.identity(4) - if repetition: - M[i, i] = cj - M[i, j] = sj*si - M[i, k] = sj*ci - M[j, i] = sj*sk - M[j, j] = -cj*ss+cc - M[j, k] = -cj*cs-sc - M[k, i] = -sj*ck - M[k, j] = cj*sc+cs - M[k, k] = cj*cc-ss - else: - M[i, i] = cj*ck - M[i, j] = sj*sc-cs - M[i, k] = sj*cc+ss - M[j, i] = cj*sk - M[j, j] = sj*ss+cc - M[j, k] = sj*cs-sc - M[k, i] = -sj - M[k, j] = cj*si - M[k, k] = cj*ci - return M - - -def euler_from_matrix(matrix, axes='sxyz'): - """Return Euler angles from rotation matrix for specified axis sequence. - - axes : One of 24 axis sequences as string or encoded tuple - - Note that many Euler angle triplets can describe one matrix. - - >>> R0 = euler_matrix(1, 2, 3, 'syxz') - >>> al, be, ga = euler_from_matrix(R0, 'syxz') - >>> R1 = euler_matrix(al, be, ga, 'syxz') - >>> numpy.allclose(R0, R1) - True - >>> angles = (4.0*math.pi) * (numpy.random.random(3) - 0.5) - >>> for axes in _AXES2TUPLE.keys(): - ... R0 = euler_matrix(axes=axes, *angles) - ... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes)) - ... if not numpy.allclose(R0, R1): print axes, "failed" - - """ - try: - firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()] - except (AttributeError, KeyError): - _ = _TUPLE2AXES[axes] - firstaxis, parity, repetition, frame = axes - - i = firstaxis - j = _NEXT_AXIS[i+parity] - k = _NEXT_AXIS[i-parity+1] - - M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3] - if repetition: - sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k]) - if sy > _EPS: - ax = math.atan2( M[i, j], M[i, k]) - ay = math.atan2( sy, M[i, i]) - az = math.atan2( M[j, i], -M[k, i]) - else: - ax = math.atan2(-M[j, k], M[j, j]) - ay = math.atan2( sy, M[i, i]) - az = 0.0 - else: - cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i]) - if cy > _EPS: - ax = math.atan2( M[k, j], M[k, k]) - ay = math.atan2(-M[k, i], cy) - az = math.atan2( M[j, i], M[i, i]) - else: - ax = math.atan2(-M[j, k], M[j, j]) - ay = math.atan2(-M[k, i], cy) - az = 0.0 - - if parity: - ax, ay, az = -ax, -ay, -az - if frame: - ax, az = az, ax - return ax, ay, az - - -def euler_from_quaternion(quaternion, axes='sxyz'): - """Return Euler angles from quaternion for specified axis sequence. - - >>> angles = euler_from_quaternion([0.06146124, 0, 0, 0.99810947]) - >>> numpy.allclose(angles, [0.123, 0, 0]) - True - - """ - return euler_from_matrix(quaternion_matrix(quaternion), axes) - - -def quaternion_from_euler(ai, aj, ak, axes='sxyz'): - """Return quaternion from Euler angles and axis sequence. - - ai, aj, ak : Euler's roll, pitch and yaw angles - axes : One of 24 axis sequences as string or encoded tuple - - >>> q = quaternion_from_euler(1, 2, 3, 'ryxz') - >>> numpy.allclose(q, [0.310622, -0.718287, 0.444435, 0.435953]) - True - - """ - try: - firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()] - except (AttributeError, KeyError): - _ = _TUPLE2AXES[axes] - firstaxis, parity, repetition, frame = axes - - i = firstaxis - j = _NEXT_AXIS[i+parity] - k = _NEXT_AXIS[i-parity+1] - - if frame: - ai, ak = ak, ai - if parity: - aj = -aj - - ai /= 2.0 - aj /= 2.0 - ak /= 2.0 - ci = math.cos(ai) - si = math.sin(ai) - cj = math.cos(aj) - sj = math.sin(aj) - ck = math.cos(ak) - sk = math.sin(ak) - cc = ci*ck - cs = ci*sk - sc = si*ck - ss = si*sk - - quaternion = numpy.empty((4, ), dtype=numpy.float64) - if repetition: - quaternion[i] = cj*(cs + sc) - quaternion[j] = sj*(cc + ss) - quaternion[k] = sj*(cs - sc) - quaternion[3] = cj*(cc - ss) - else: - quaternion[i] = cj*sc - sj*cs - quaternion[j] = cj*ss + sj*cc - quaternion[k] = cj*cs - sj*sc - quaternion[3] = cj*cc + sj*ss - if parity: - quaternion[j] *= -1 - - return quaternion - - -def quaternion_about_axis(angle, axis): - """Return quaternion for rotation about axis. - - >>> q = quaternion_about_axis(0.123, (1, 0, 0)) - >>> numpy.allclose(q, [0.06146124, 0, 0, 0.99810947]) - True - - """ - quaternion = numpy.zeros((4, ), dtype=numpy.float64) - quaternion[:3] = axis[:3] - qlen = vector_norm(quaternion) - if qlen > _EPS: - quaternion *= math.sin(angle/2.0) / qlen - quaternion[3] = math.cos(angle/2.0) - return quaternion - - -def quaternion_matrix(quaternion): - """Return homogeneous rotation matrix from quaternion. - - >>> R = quaternion_matrix([0.06146124, 0, 0, 0.99810947]) - >>> numpy.allclose(R, rotation_matrix(0.123, (1, 0, 0))) - True - - """ - q = numpy.array(quaternion[:4], dtype=numpy.float64, copy=True) - nq = numpy.dot(q, q) - if nq < _EPS: - return numpy.identity(4) - q *= math.sqrt(2.0 / nq) - q = numpy.outer(q, q) - return numpy.array(( - (1.0-q[1, 1]-q[2, 2], q[0, 1]-q[2, 3], q[0, 2]+q[1, 3], 0.0), - ( q[0, 1]+q[2, 3], 1.0-q[0, 0]-q[2, 2], q[1, 2]-q[0, 3], 0.0), - ( q[0, 2]-q[1, 3], q[1, 2]+q[0, 3], 1.0-q[0, 0]-q[1, 1], 0.0), - ( 0.0, 0.0, 0.0, 1.0) - ), dtype=numpy.float64) - - -def quaternion_from_matrix(matrix): - """Return quaternion from rotation matrix. - - >>> R = rotation_matrix(0.123, (1, 2, 3)) - >>> q = quaternion_from_matrix(R) - >>> numpy.allclose(q, [0.0164262, 0.0328524, 0.0492786, 0.9981095]) - True - - """ - q = numpy.empty((4, ), dtype=numpy.float64) - M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4] - t = numpy.trace(M) - if t > M[3, 3]: - q[3] = t - q[2] = M[1, 0] - M[0, 1] - q[1] = M[0, 2] - M[2, 0] - q[0] = M[2, 1] - M[1, 2] - else: - i, j, k = 0, 1, 2 - if M[1, 1] > M[0, 0]: - i, j, k = 1, 2, 0 - if M[2, 2] > M[i, i]: - i, j, k = 2, 0, 1 - t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3] - q[i] = t - q[j] = M[i, j] + M[j, i] - q[k] = M[k, i] + M[i, k] - q[3] = M[k, j] - M[j, k] - q *= 0.5 / math.sqrt(t * M[3, 3]) - return q - - -def quaternion_multiply(quaternion1, quaternion0): - """Return multiplication of two quaternions. - - >>> q = quaternion_multiply([1, -2, 3, 4], [-5, 6, 7, 8]) - >>> numpy.allclose(q, [-44, -14, 48, 28]) - True - - """ - x0, y0, z0, w0 = quaternion0 - x1, y1, z1, w1 = quaternion1 - return numpy.array(( - x1*w0 + y1*z0 - z1*y0 + w1*x0, - -x1*z0 + y1*w0 + z1*x0 + w1*y0, - x1*y0 - y1*x0 + z1*w0 + w1*z0, - -x1*x0 - y1*y0 - z1*z0 + w1*w0), dtype=numpy.float64) - - -def quaternion_conjugate(quaternion): - """Return conjugate of quaternion. - - >>> q0 = random_quaternion() - >>> q1 = quaternion_conjugate(q0) - >>> q1[3] == q0[3] and all(q1[:3] == -q0[:3]) - True - - """ - return numpy.array((-quaternion[0], -quaternion[1], - -quaternion[2], quaternion[3]), dtype=numpy.float64) - - -def quaternion_inverse(quaternion): - """Return inverse of quaternion. - - >>> q0 = random_quaternion() - >>> q1 = quaternion_inverse(q0) - >>> numpy.allclose(quaternion_multiply(q0, q1), [0, 0, 0, 1]) - True - - """ - return quaternion_conjugate(quaternion) / numpy.dot(quaternion, quaternion) - - -def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True): - """Return spherical linear interpolation between two quaternions. - - >>> q0 = random_quaternion() - >>> q1 = random_quaternion() - >>> q = quaternion_slerp(q0, q1, 0.0) - >>> numpy.allclose(q, q0) - True - >>> q = quaternion_slerp(q0, q1, 1.0, 1) - >>> numpy.allclose(q, q1) - True - >>> q = quaternion_slerp(q0, q1, 0.5) - >>> angle = math.acos(numpy.dot(q0, q)) - >>> numpy.allclose(2.0, math.acos(numpy.dot(q0, q1)) / angle) or \ - numpy.allclose(2.0, math.acos(-numpy.dot(q0, q1)) / angle) - True - - """ - q0 = unit_vector(quat0[:4]) - q1 = unit_vector(quat1[:4]) - if fraction == 0.0: - return q0 - elif fraction == 1.0: - return q1 - d = numpy.dot(q0, q1) - if abs(abs(d) - 1.0) < _EPS: - return q0 - if shortestpath and d < 0.0: - # invert rotation - d = -d - q1 *= -1.0 - angle = math.acos(d) + spin * math.pi - if abs(angle) < _EPS: - return q0 - isin = 1.0 / math.sin(angle) - q0 *= math.sin((1.0 - fraction) * angle) * isin - q1 *= math.sin(fraction * angle) * isin - q0 += q1 - return q0 - - -def random_quaternion(rand=None): - """Return uniform random unit quaternion. - - rand: array like or None - Three independent random variables that are uniformly distributed - between 0 and 1. - - >>> q = random_quaternion() - >>> numpy.allclose(1.0, vector_norm(q)) - True - >>> q = random_quaternion(numpy.random.random(3)) - >>> q.shape - (4,) - - """ - if rand is None: - rand = numpy.random.rand(3) - else: - assert len(rand) == 3 - r1 = numpy.sqrt(1.0 - rand[0]) - r2 = numpy.sqrt(rand[0]) - pi2 = math.pi * 2.0 - t1 = pi2 * rand[1] - t2 = pi2 * rand[2] - return numpy.array((numpy.sin(t1)*r1, - numpy.cos(t1)*r1, - numpy.sin(t2)*r2, - numpy.cos(t2)*r2), dtype=numpy.float64) - - -def random_rotation_matrix(rand=None): - """Return uniform random rotation matrix. - - rnd: array like - Three independent random variables that are uniformly distributed - between 0 and 1 for each returned quaternion. - - >>> R = random_rotation_matrix() - >>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4)) - True - - """ - return quaternion_matrix(random_quaternion(rand)) - - -class Arcball(object): - """Virtual Trackball Control. - - >>> ball = Arcball() - >>> ball = Arcball(initial=numpy.identity(4)) - >>> ball.place([320, 320], 320) - >>> ball.down([500, 250]) - >>> ball.drag([475, 275]) - >>> R = ball.matrix() - >>> numpy.allclose(numpy.sum(R), 3.90583455) - True - >>> ball = Arcball(initial=[0, 0, 0, 1]) - >>> ball.place([320, 320], 320) - >>> ball.setaxes([1,1,0], [-1, 1, 0]) - >>> ball.setconstrain(True) - >>> ball.down([400, 200]) - >>> ball.drag([200, 400]) - >>> R = ball.matrix() - >>> numpy.allclose(numpy.sum(R), 0.2055924) - True - >>> ball.next() - - """ - - def __init__(self, initial=None): - """Initialize virtual trackball control. - - initial : quaternion or rotation matrix - - """ - self._axis = None - self._axes = None - self._radius = 1.0 - self._center = [0.0, 0.0] - self._vdown = numpy.array([0, 0, 1], dtype=numpy.float64) - self._constrain = False - - if initial is None: - self._qdown = numpy.array([0, 0, 0, 1], dtype=numpy.float64) - else: - initial = numpy.array(initial, dtype=numpy.float64) - if initial.shape == (4, 4): - self._qdown = quaternion_from_matrix(initial) - elif initial.shape == (4, ): - initial /= vector_norm(initial) - self._qdown = initial - else: - raise ValueError("initial not a quaternion or matrix.") - - self._qnow = self._qpre = self._qdown - - def place(self, center, radius): - """Place Arcball, e.g. when window size changes. - - center : sequence[2] - Window coordinates of trackball center. - radius : float - Radius of trackball in window coordinates. - - """ - self._radius = float(radius) - self._center[0] = center[0] - self._center[1] = center[1] - - def setaxes(self, *axes): - """Set axes to constrain rotations.""" - if axes is None: - self._axes = None - else: - self._axes = [unit_vector(axis) for axis in axes] - - def setconstrain(self, constrain): - """Set state of constrain to axis mode.""" - self._constrain = constrain == True - - def getconstrain(self): - """Return state of constrain to axis mode.""" - return self._constrain - - def down(self, point): - """Set initial cursor window coordinates and pick constrain-axis.""" - self._vdown = arcball_map_to_sphere(point, self._center, self._radius) - self._qdown = self._qpre = self._qnow - - if self._constrain and self._axes is not None: - self._axis = arcball_nearest_axis(self._vdown, self._axes) - self._vdown = arcball_constrain_to_axis(self._vdown, self._axis) - else: - self._axis = None - - def drag(self, point): - """Update current cursor window coordinates.""" - vnow = arcball_map_to_sphere(point, self._center, self._radius) - - if self._axis is not None: - vnow = arcball_constrain_to_axis(vnow, self._axis) - - self._qpre = self._qnow - - t = numpy.cross(self._vdown, vnow) - if numpy.dot(t, t) < _EPS: - self._qnow = self._qdown - else: - q = [t[0], t[1], t[2], numpy.dot(self._vdown, vnow)] - self._qnow = quaternion_multiply(q, self._qdown) - - def next(self, acceleration=0.0): - """Continue rotation in direction of last drag.""" - q = quaternion_slerp(self._qpre, self._qnow, 2.0+acceleration, False) - self._qpre, self._qnow = self._qnow, q - - def matrix(self): - """Return homogeneous rotation matrix.""" - return quaternion_matrix(self._qnow) - - -def arcball_map_to_sphere(point, center, radius): - """Return unit sphere coordinates from window coordinates.""" - v = numpy.array(((point[0] - center[0]) / radius, - (center[1] - point[1]) / radius, - 0.0), dtype=numpy.float64) - n = v[0]*v[0] + v[1]*v[1] - if n > 1.0: - v /= math.sqrt(n) # position outside of sphere - else: - v[2] = math.sqrt(1.0 - n) - return v - - -def arcball_constrain_to_axis(point, axis): - """Return sphere point perpendicular to axis.""" - v = numpy.array(point, dtype=numpy.float64, copy=True) - a = numpy.array(axis, dtype=numpy.float64, copy=True) - v -= a * numpy.dot(a, v) # on plane - n = vector_norm(v) - if n > _EPS: - if v[2] < 0.0: - v *= -1.0 - v /= n - return v - if a[2] == 1.0: - return numpy.array([1, 0, 0], dtype=numpy.float64) - return unit_vector([-a[1], a[0], 0]) - - -def arcball_nearest_axis(point, axes): - """Return axis, which arc is nearest to point.""" - point = numpy.array(point, dtype=numpy.float64, copy=False) - nearest = None - mx = -1.0 - for axis in axes: - t = numpy.dot(arcball_constrain_to_axis(point, axis), point) - if t > mx: - nearest = axis - mx = t - return nearest - - -# epsilon for testing whether a number is close to zero -_EPS = numpy.finfo(float).eps * 4.0 - -# axis sequences for Euler angles -_NEXT_AXIS = [1, 2, 0, 1] - -# map axes strings to/from tuples of inner axis, parity, repetition, frame -_AXES2TUPLE = { - 'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0), - 'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0), - 'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0), - 'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0), - 'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1), - 'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1), - 'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1), - 'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)} - -_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items()) - -# helper functions - -def vector_norm(data, axis=None, out=None): - """Return length, i.e. eucledian norm, of ndarray along axis. - - >>> v = numpy.random.random(3) - >>> n = vector_norm(v) - >>> numpy.allclose(n, numpy.linalg.norm(v)) - True - >>> v = numpy.random.rand(6, 5, 3) - >>> n = vector_norm(v, axis=-1) - >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2))) - True - >>> n = vector_norm(v, axis=1) - >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) - True - >>> v = numpy.random.rand(5, 4, 3) - >>> n = numpy.empty((5, 3), dtype=numpy.float64) - >>> vector_norm(v, axis=1, out=n) - >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) - True - >>> vector_norm([]) - 0.0 - >>> vector_norm([1.0]) - 1.0 - - """ - data = numpy.array(data, dtype=numpy.float64, copy=True) - if out is None: - if data.ndim == 1: - return math.sqrt(numpy.dot(data, data)) - data *= data - out = numpy.atleast_1d(numpy.sum(data, axis=axis)) - numpy.sqrt(out, out) - return out - else: - data *= data - numpy.sum(data, axis=axis, out=out) - numpy.sqrt(out, out) - - -def unit_vector(data, axis=None, out=None): - """Return ndarray normalized by length, i.e. eucledian norm, along axis. - - >>> v0 = numpy.random.random(3) - >>> v1 = unit_vector(v0) - >>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0)) - True - >>> v0 = numpy.random.rand(5, 4, 3) - >>> v1 = unit_vector(v0, axis=-1) - >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2) - >>> numpy.allclose(v1, v2) - True - >>> v1 = unit_vector(v0, axis=1) - >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1) - >>> numpy.allclose(v1, v2) - True - >>> v1 = numpy.empty((5, 4, 3), dtype=numpy.float64) - >>> unit_vector(v0, axis=1, out=v1) - >>> numpy.allclose(v1, v2) - True - >>> list(unit_vector([])) - [] - >>> list(unit_vector([1.0])) - [1.0] - - """ - if out is None: - data = numpy.array(data, dtype=numpy.float64, copy=True) - if data.ndim == 1: - data /= math.sqrt(numpy.dot(data, data)) - return data - else: - if out is not data: - out[:] = numpy.array(data, copy=False) - data = out - length = numpy.atleast_1d(numpy.sum(data*data, axis)) - numpy.sqrt(length, length) - if axis is not None: - length = numpy.expand_dims(length, axis) - data /= length - if out is None: - return data - - -def random_vector(size): - """Return array of random doubles in the half-open interval [0.0, 1.0). - - >>> v = random_vector(10000) - >>> numpy.all(v >= 0.0) and numpy.all(v < 1.0) - True - >>> v0 = random_vector(10) - >>> v1 = random_vector(10) - >>> numpy.any(v0 == v1) - False - - """ - return numpy.random.random(size) - - -def inverse_matrix(matrix): - """Return inverse of square transformation matrix. - - >>> M0 = random_rotation_matrix() - >>> M1 = inverse_matrix(M0.T) - >>> numpy.allclose(M1, numpy.linalg.inv(M0.T)) - True - >>> for size in range(1, 7): - ... M0 = numpy.random.rand(size, size) - ... M1 = inverse_matrix(M0) - ... if not numpy.allclose(M1, numpy.linalg.inv(M0)): print size - - """ - return numpy.linalg.inv(matrix) - - -def concatenate_matrices(*matrices): - """Return concatenation of series of transformation matrices. - - >>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5 - >>> numpy.allclose(M, concatenate_matrices(M)) - True - >>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T)) - True - - """ - M = numpy.identity(4) - for i in matrices: - M = numpy.dot(M, i) - return M - - -def is_same_transform(matrix0, matrix1): - """Return True if two matrices perform same transformation. - - >>> is_same_transform(numpy.identity(4), numpy.identity(4)) - True - >>> is_same_transform(numpy.identity(4), random_rotation_matrix()) - False - - """ - matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True) - matrix0 /= matrix0[3, 3] - matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True) - matrix1 /= matrix1[3, 3] - return numpy.allclose(matrix0, matrix1) - - -def _import_module(module_name, warn=True, prefix='_py_', ignore='_'): - """Try import all public attributes from module into global namespace. - - Existing attributes with name clashes are renamed with prefix. - Attributes starting with underscore are ignored by default. - - Return True on successful import. - - """ - try: - module = __import__(module_name) - except ImportError: - if warn: - warnings.warn("Failed to import module " + module_name) - else: - for attr in dir(module): - if ignore and attr.startswith(ignore): - continue - if prefix: - if attr in globals(): - globals()[prefix + attr] = globals()[attr] - elif warn: - warnings.warn("No Python implementation of " + attr) - globals()[attr] = getattr(module, attr) - return True \ No newline at end of file diff --git a/common/helpers/tf_module/srv/ApplyTransform.srv b/common/helpers/tf_module/srv/ApplyTransform.srv deleted file mode 100644 index 2c2299770..000000000 --- a/common/helpers/tf_module/srv/ApplyTransform.srv +++ /dev/null @@ -1,4 +0,0 @@ -geometry_msgs/Point[] points # point we want to transform -geometry_msgs/TransformStamped transform #the transform we want to use ---- -geometry_msgs/Point[] new_points # the transformed point \ No newline at end of file diff --git a/common/helpers/tf_module/srv/BaseTransform.srv b/common/helpers/tf_module/srv/BaseTransform.srv deleted file mode 100644 index 4ed61b6b5..000000000 --- a/common/helpers/tf_module/srv/BaseTransform.srv +++ /dev/null @@ -1,5 +0,0 @@ -geometry_msgs/Point[] points # point we want to transform -std_msgs/String frame # the frame of the point -std_msgs/String target_frame # the frame we want to transform the point to ---- -geometry_msgs/PointStamped[] new_points # the transformed point diff --git a/common/helpers/tf_module/srv/LatestTransform.srv b/common/helpers/tf_module/srv/LatestTransform.srv deleted file mode 100644 index e41175e19..000000000 --- a/common/helpers/tf_module/srv/LatestTransform.srv +++ /dev/null @@ -1,4 +0,0 @@ -string from_frame # source frame -string target_frame # target frame ---- -geometry_msgs/TransformStamped transform # transform \ No newline at end of file diff --git a/common/helpers/tf_module/srv/TfTransform.srv b/common/helpers/tf_module/srv/TfTransform.srv deleted file mode 100644 index 35b0e70fc..000000000 --- a/common/helpers/tf_module/srv/TfTransform.srv +++ /dev/null @@ -1,10 +0,0 @@ -geometry_msgs/PoseArray pose_array -sensor_msgs/PointCloud2 pointcloud -geometry_msgs/PointStamped point -std_msgs/String target_frame -std_msgs/String source_frame ---- -geometry_msgs/PoseArray target_pose_array -sensor_msgs/PointCloud2 target_pointcloud -geometry_msgs/PointStamped target_point -geometry_msgs/Pose translation_and_rotation \ No newline at end of file diff --git a/common/speech/lasr_rasa/package.xml b/common/speech/lasr_rasa/package.xml index f909b5640..29394262f 100644 --- a/common/speech/lasr_rasa/package.xml +++ b/common/speech/lasr_rasa/package.xml @@ -7,7 +7,7 @@ - Jared + Jared Swift diff --git a/common/speech/lasr_rasa/scripts/rasa.sh b/common/speech/lasr_rasa/scripts/rasa.sh deleted file mode 100755 index a8b7aeacd..000000000 --- a/common/speech/lasr_rasa/scripts/rasa.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/bash -rosrun lasr_rasa rasa run --enable-api -p $1 -m $2 \ No newline at end of file diff --git a/common/speech/lasr_speech/CMakeLists.txt b/common/speech/lasr_speech/CMakeLists.txt deleted file mode 100644 index 4a07fded5..000000000 --- a/common/speech/lasr_speech/CMakeLists.txt +++ /dev/null @@ -1,208 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(lasr_speech) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - rospy - std_msgs - message_generation - lasr_rasa - lasr_speech_recognition_msgs - lasr_speech_recognition_whisper -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -# catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder -add_service_files( - FILES - Speech.srv -) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -generate_messages( - DEPENDENCIES - std_msgs -) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES lasr_speech -# CATKIN_DEPENDS lasr_rasa lasr_speech_recognition_msgs lasr_speech_recognition_whisper rospy -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/lasr_speech.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/lasr_speech_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_lasr_speech.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/common/speech/lasr_speech/README.md b/common/speech/lasr_speech/README.md deleted file mode 100644 index 6b14f0f6e..000000000 --- a/common/speech/lasr_speech/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# lasr_speech - -The lasr_speech package - -This package is maintained by: -- [Jared](mailto:j.w.swift@outlook.com) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- lasr_rasa (build) -- lasr_speech_recognition_msgs (build) -- lasr_speech_recognition_whisper (build) -- rospy (build) -- lasr_rasa (exec) -- lasr_speech_recognition_msgs (exec) -- lasr_speech_recognition_whisper (exec) -- rospy (exec) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -#### `speech` - -No description provided. - -| Argument | Default | Description | -|:-:|:-:|---| -| matcher | by-index | | -| device_param | | | -| rasa_model | | | - - - -### Messages - -This package has no messages. - -### Services - -#### `Speech` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| play_sound | bool | | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| json_response | string | | -| success | bool | | - - -### Actions - -This package has no actions. diff --git a/common/speech/lasr_speech/launch/speech.launch b/common/speech/lasr_speech/launch/speech.launch deleted file mode 100644 index be8729f55..000000000 --- a/common/speech/lasr_speech/launch/speech.launch +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - - - - diff --git a/common/speech/lasr_speech/nodes/service b/common/speech/lasr_speech/nodes/service deleted file mode 100755 index 1c308bd8b..000000000 --- a/common/speech/lasr_speech/nodes/service +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env python3 -import rospy -from lasr_speech.srv import Speech, SpeechResponse -from lasr_rasa.srv import Rasa -from lasr_speech_recognition_msgs.srv import TranscribeAudio -#import sounddevice -#import soundfile -#import pyaudio -from time import sleep -from multiprocessing import Process -import rospkg -import os -import actionlib - -from lasr_speech_recognition_msgs.msg import ( # type: ignore - TranscribeSpeechAction, - TranscribeSpeechGoal, -) - - -class TranscribeAndParse: - - def __init__(self): - rospy.wait_for_service("/lasr_rasa/parse") - self.rasa = rospy.ServiceProxy("/lasr_rasa/parse", Rasa) - # self.transcribe_audio = rospy.ServiceProxy("/whisper/transcribe_audio", TranscribeAudio) - self.speech_client = actionlib.SimpleActionClient("transcribe_speech", TranscribeSpeechAction) - self.speech_client.wait_for_server() - - # self.sound_data = soundfile.read(os.path.join(rospkg.RosPack().get_path("lasr_speech"), "sounds", "beep.wav"))[0] - - # def play_sound(self): - # def play(): - # audio_interface = pyaudio.PyAudio() - # sample_rate = int(audio_interface.get_default_output_device_info()["defaultSampleRate"]) - # sounddevice.play(self.sound_data, sample_rate, - # device=audio_interface.get_default_output_device_info()["index"]) - # sounddevice.wait() - # audio_interface.terminate() - # p = Process(target=play) - # p.start() - #sleep(0.5) - - def __call__(self, req): - # if req.play_sound: - # self.play_sound() - goal = TranscribeSpeechGoal() - self.speech_client.send_goal(goal) - self.speech_client.wait_for_result() - result = self.speech_client.get_result() - text = result.sequence - rospy.loginfo(text) - rasa_response = self.rasa(text) - rospy.loginfo(rasa_response) - return SpeechResponse(rasa_response.json_response, rasa_response.success) - -if __name__ == "__main__": - rospy.init_node("lasr_speech") - worker = TranscribeAndParse() - rospy.Service("/lasr_speech/transcribe_and_parse", Speech, worker) - rospy.spin() diff --git a/common/speech/lasr_speech/package.xml b/common/speech/lasr_speech/package.xml deleted file mode 100644 index 7af6f40c3..000000000 --- a/common/speech/lasr_speech/package.xml +++ /dev/null @@ -1,71 +0,0 @@ - - - lasr_speech - 0.0.0 - The lasr_speech package - - - - - Jared - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - lasr_rasa - lasr_speech_recognition_msgs - lasr_speech_recognition_whisper - rospy - lasr_rasa - lasr_speech_recognition_msgs - lasr_speech_recognition_whisper - rospy - lasr_rasa - lasr_speech_recognition_msgs - lasr_speech_recognition_whisper - rospy - - - - - - - - diff --git a/common/speech/lasr_speech/sounds/beep.wav b/common/speech/lasr_speech/sounds/beep.wav deleted file mode 100644 index f224049f9..000000000 Binary files a/common/speech/lasr_speech/sounds/beep.wav and /dev/null differ diff --git a/common/speech/lasr_speech/srv/Speech.srv b/common/speech/lasr_speech/srv/Speech.srv deleted file mode 100644 index bd5f7760b..000000000 --- a/common/speech/lasr_speech/srv/Speech.srv +++ /dev/null @@ -1,4 +0,0 @@ -bool play_sound ---- -string json_response -bool success \ No newline at end of file diff --git a/common/speech/lasr_speech_recognition_whisper/nodes/transcribe_microphone_server b/common/speech/lasr_speech_recognition_whisper/nodes/transcribe_microphone_server index b4d164699..3ab9d2983 100644 --- a/common/speech/lasr_speech_recognition_whisper/nodes/transcribe_microphone_server +++ b/common/speech/lasr_speech_recognition_whisper/nodes/transcribe_microphone_server @@ -1,6 +1,6 @@ #!/usr/bin/env python3 - import os +import sounddevice # needed to remove ALSA error messages import argparse from typing import Optional from dataclasses import dataclass @@ -13,31 +13,9 @@ import torch import actionlib import speech_recognition as sr # type: ignore import lasr_speech_recognition_msgs.msg # type: ignore +from std_msgs.msg import String # type: ignore from lasr_speech_recognition_whisper import load_model # type: ignore -# Error handler to remove ALSA error messages taken from: -# https://stackoverflow.com/questions/7088672/pyaudio-working-but-spits-out-error-messages-each-time/17673011#17673011 - -from ctypes import * -from contextlib import contextmanager - -ERROR_HANDLER_FUNC = CFUNCTYPE(None, c_char_p, c_int, c_char_p, c_int, c_char_p) - - -def py_error_handler(filename, line, function, err, fmt): - pass - - -c_error_handler = ERROR_HANDLER_FUNC(py_error_handler) - - -@contextmanager -def noalsaerr(): - asound = cdll.LoadLibrary("libasound.so") - asound.snd_lib_error_set_handler(c_error_handler) - yield - asound.snd_lib_error_set_handler(None) - @dataclass class speech_model_params: @@ -83,40 +61,28 @@ class TranscribeSpeechAction(object): self._action_name = action_name self._model_params = model_params + self._transcription_server = rospy.Publisher( + "/live_speech_transcription", String, queue_size=10 + ) - with noalsaerr(): - self._model = load_model( - self._model_params.model_name, - self._model_params.device, - self._model_params.warmup, - ) - # Configure the speech recogniser object and adjust for ambient noise - self.recogniser = self._configure_recogniser() - # Setup the action server and register execution callback - self._action_server = actionlib.SimpleActionServer( - self._action_name, - lasr_speech_recognition_msgs.msg.TranscribeSpeechAction, - execute_cb=self.execute_cb, - auto_start=False, - ) - self._action_server.register_preempt_callback(self.prempt_cb) - # Setup the timer for adjusting the microphone for ambient noise every x seconds - self._timer_duration = self._model_params.timer_duration - self._timer = rospy.Timer( - rospy.Duration(self._timer_duration), self._timer_cb - ) - self._listening = False - - self._action_server.start() + self._model = load_model( + self._model_params.model_name, + self._model_params.device, + self._model_params.warmup, + ) + # Configure the speech recogniser object and adjust for ambient noise + self.recogniser = self._configure_recogniser(ambient_adj=True) + # Setup the action server and register execution callback + self._action_server = actionlib.SimpleActionServer( + self._action_name, + lasr_speech_recognition_msgs.msg.TranscribeSpeechAction, + execute_cb=self.execute_cb, + auto_start=False, + ) + self._action_server.register_preempt_callback(self.prempt_cb) + self._listening = False - def _timer_cb(self, _) -> None: - """Adjusts the microphone for ambient noise, unless the action server is listening.""" - if self._listening: - return - rospy.loginfo("Adjusting microphone for ambient noise...") - with noalsaerr(): - with self._configure_microphone() as source: - self.recogniser.adjust_for_ambient_noise(source) + self._action_server.start() def _reset_timer(self) -> None: """Resets the timer for adjusting the microphone for ambient noise.""" @@ -190,17 +156,13 @@ class TranscribeSpeechAction(object): rospy.loginfo("Request Received") if self._action_server.is_preempt_requested(): return - # Since we are about to listen, reset the timer for adjusting the microphone for ambient noise - # as this assumes self_timer_duration seconds of silence before adjusting - self._reset_timer() - with noalsaerr(): - with self._configure_microphone() as src: - self._listening = True - wav_data = self.recogniser.listen( - src, - timeout=self._model_params.start_timeout, - phrase_time_limit=self._model_params.end_timeout, - ).get_wav_data() + with self._configure_microphone() as src: + self._listening = True + wav_data = self.recogniser.listen( + src, + timeout=self._model_params.start_timeout, + phrase_time_limit=self._model_params.end_timeout, + ).get_wav_data() # Magic number 32768.0 is the maximum value of a 16-bit signed integer float_data = ( np.frombuffer(wav_data, dtype=np.int16).astype(np.float32, order="C") @@ -223,7 +185,7 @@ class TranscribeSpeechAction(object): rospy.loginfo( f"Time taken: {transcription_end_time - transcription_start_time:.2f}s" ) - + self._transcription_server.publish(phrase) if self._action_server.is_preempt_requested(): self._listening = False return @@ -248,13 +210,13 @@ def parse_args() -> dict: ) parser.add_argument( - "--action-name", + "--action_name", type=str, default="transcribe_speech", help="Name of the action server.", ) parser.add_argument( - "--model-name", + "--model_name", type=str, default="medium.en", help="Name of the speech recognition model.", @@ -289,12 +251,6 @@ def parse_args() -> dict: default=None, help="Microphone device index or name", ) - parser.add_argument( - "--timer_duration", - type=int, - default=20, - help="Number of seconds of silence before the ambient noise adjustment is called.", - ) parser.add_argument( "--no_warmup", action="store_true", @@ -328,8 +284,6 @@ def configure_model_params(config: dict) -> speech_model_params: model_params.sample_rate = config["sample_rate"] if config["mic_device"]: model_params.mic_device = config["mic_device"] - if config["timer_duration"]: - model_params.timer_duration = config["timer_duration"] if config["no_warmup"]: model_params.warmup = False diff --git a/common/speech/lasr_speech_recognition_whisper/requirements.in b/common/speech/lasr_speech_recognition_whisper/requirements.in index 8209d34fc..da48c5086 100644 --- a/common/speech/lasr_speech_recognition_whisper/requirements.in +++ b/common/speech/lasr_speech_recognition_whisper/requirements.in @@ -1,5 +1,6 @@ SpeechRecognition==3.10.0 -openai-whisper==20230314 +sounddevice==0.4.6 +openai-whisper==20231117 PyAudio==0.2.13 PyYaml==6.0.1 rospkg==1.5.0 diff --git a/common/speech/lasr_speech_recognition_whisper/requirements.txt b/common/speech/lasr_speech_recognition_whisper/requirements.txt index a6d9bdebe..1cace21e8 100644 --- a/common/speech/lasr_speech_recognition_whisper/requirements.txt +++ b/common/speech/lasr_speech_recognition_whisper/requirements.txt @@ -1,51 +1,54 @@ -catkin-pkg==0.5.2 # via rospkg -certifi==2023.7.22 # via requests -charset-normalizer==3.2.0 # via requests -cmake==3.27.2 # via triton -distro==1.8.0 # via rospkg +--extra-index-url https://pypi.ngc.nvidia.com +--trusted-host pypi.ngc.nvidia.com + +catkin-pkg==1.0.0 # via rospkg +certifi==2024.2.2 # via requests +cffi==1.16.0 # via sounddevice +charset-normalizer==3.3.2 # via requests +distro==1.9.0 # via rospkg docutils==0.20.1 # via catkin-pkg -ffmpeg-python==0.2.0 # via openai-whisper -filelock==3.12.2 # via torch, triton -future==0.18.3 # via ffmpeg-python -idna==3.4 # via requests -jinja2==3.1.2 # via torch -lit==16.0.6 # via triton -llvmlite==0.40.1 # via numba -markupsafe==2.1.3 # via jinja2 -more-itertools==10.1.0 # via openai-whisper +filelock==3.13.1 # via torch, triton +fsspec==2024.2.0 # via torch +idna==3.6 # via requests +jinja2==3.1.3 # via torch +llvmlite==0.42.0 # via numba +markupsafe==2.1.5 # via jinja2 +more-itertools==10.2.0 # via openai-whisper mpmath==1.3.0 # via sympy -networkx==3.1 # via torch -numba==0.57.1 # via openai-whisper -numpy==1.24.4 # via numba, openai-whisper -nvidia-cublas-cu11==11.10.3.66 # via nvidia-cudnn-cu11, nvidia-cusolver-cu11, torch -nvidia-cuda-cupti-cu11==11.7.101 # via torch -nvidia-cuda-nvrtc-cu11==11.7.99 # via torch -nvidia-cuda-runtime-cu11==11.7.99 # via torch -nvidia-cudnn-cu11==8.5.0.96 # via torch -nvidia-cufft-cu11==10.9.0.58 # via torch -nvidia-curand-cu11==10.2.10.91 # via torch -nvidia-cusolver-cu11==11.4.0.1 # via torch -nvidia-cusparse-cu11==11.7.4.91 # via torch -nvidia-nccl-cu11==2.14.3 # via torch -nvidia-nvtx-cu11==11.7.91 # via torch -openai-whisper==20230314 # via -r requirements.in +networkx==3.2.1 # via torch +numba==0.59.0 # via openai-whisper +numpy==1.26.4 # via numba, openai-whisper +nvidia-cublas-cu12==12.1.3.1 # via nvidia-cudnn-cu12, nvidia-cusolver-cu12, torch +nvidia-cuda-cupti-cu12==12.1.105 # via torch +nvidia-cuda-nvrtc-cu12==12.1.105 # via torch +nvidia-cuda-runtime-cu12==12.1.105 # via torch +nvidia-cudnn-cu12==8.9.2.26 # via torch +nvidia-cufft-cu12==11.0.2.54 # via torch +nvidia-curand-cu12==10.3.2.106 # via torch +nvidia-cusolver-cu12==11.4.5.107 # via torch +nvidia-cusparse-cu12==12.1.0.106 # via nvidia-cusolver-cu12, torch +nvidia-nccl-cu12==2.19.3 # via torch +nvidia-nvjitlink-cu12==12.4.99 # via nvidia-cusolver-cu12, nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 # via torch +openai-whisper==20231117 # via -r requirements.in pyaudio==0.2.13 # via -r requirements.in -pyparsing==3.1.1 # via catkin-pkg -python-dateutil==2.8.2 # via catkin-pkg +pycparser==2.21 # via cffi +pyparsing==3.1.2 # via catkin-pkg +python-dateutil==2.9.0.post0 # via catkin-pkg pyyaml==6.0.1 # via -r requirements.in, rospkg -regex==2023.8.8 # via tiktoken +regex==2023.12.25 # via tiktoken requests==2.31.0 # via speechrecognition, tiktoken rospkg==1.5.0 # via -r requirements.in six==1.16.0 # via python-dateutil +sounddevice==0.4.6 # via -r requirements.in speechrecognition==3.10.0 # via -r requirements.in sympy==1.12 # via torch -tiktoken==0.3.1 # via openai-whisper -torch==2.0.1 # via openai-whisper, triton -tqdm==4.66.1 # via openai-whisper -triton==2.0.0 # via openai-whisper, torch -typing-extensions==4.7.1 # via torch -urllib3==2.0.4 # via requests -wheel==0.41.1 # via nvidia-cublas-cu11, nvidia-cuda-cupti-cu11, nvidia-cuda-runtime-cu11, nvidia-curand-cu11, nvidia-cusparse-cu11, nvidia-nvtx-cu11 +tiktoken==0.6.0 # via openai-whisper +torch==2.2.1 # via openai-whisper +tqdm==4.66.2 # via openai-whisper +triton==2.2.0 # via openai-whisper, torch +typing-extensions==4.10.0 # via torch +urllib3==2.2.1 # via requests # The following packages are considered to be unsafe in a requirements file: # setuptools diff --git a/common/speech/lasr_voice/package.xml b/common/speech/lasr_voice/package.xml index 499c4d263..51e5d8fd5 100644 --- a/common/speech/lasr_voice/package.xml +++ b/common/speech/lasr_voice/package.xml @@ -7,7 +7,7 @@ - elisabeth + Jared Swift diff --git a/common/speech/lasr_voice/src/lasr_voice/voice.py b/common/speech/lasr_voice/src/lasr_voice/voice.py index fd6307607..0bc657c9f 100755 --- a/common/speech/lasr_voice/src/lasr_voice/voice.py +++ b/common/speech/lasr_voice/src/lasr_voice/voice.py @@ -4,18 +4,17 @@ import actionlib from actionlib_msgs.msg import GoalStatus -# TODO: input is real life or simulation, as it will voice will not play otherwise class Voice: def __init__(self): - self._tts_client = actionlib.SimpleActionClient('/tts', TtsAction) - self.can_tts = self._tts_client.wait_for_server(rospy.Duration(10.0)) + self._tts_client = actionlib.SimpleActionClient("/tts", TtsAction) + self._can_tts = self._tts_client.wait_for_server(rospy.Duration(10.0)) def __tts(self, text): - if self.can_tts: + if self._can_tts: goal = TtsGoal() goal.rawtext.text = text - goal.rawtext.lang_id = 'en_GB' + goal.rawtext.lang_id = "en_GB" self._tts_client.send_goal(goal) rospy.loginfo(f"\033[32mTIAGO: {text}\033[0m") @@ -30,19 +29,7 @@ def get_tts_status(self): return self._tts_client.get_state() def is_running(self): - return self._tts_client.get_state() == GoalStatus.PENDING or \ - self._tts_client.get_state() == GoalStatus.ACTIVE - def speak(self, text): - if rospy.get_param('/is_simulation'): - rospy.loginfo(text) - else: - rospy.loginfo(text) - self.sync_tts(text) - -if __name__ == '__main__': - rospy.init_node("voice_node", anonymous=True) - voice = Voice() - voice.sync_tts("hello world nr 1") - voice.async_tts("hello world nr 2") - while voice.is_running(): - print("still talking") + return ( + self._tts_client.get_state() == GoalStatus.PENDING + or self._tts_client.get_state() == GoalStatus.ACTIVE + ) diff --git a/common/third_party/aruco_ros b/common/third_party/aruco_ros deleted file mode 160000 index e8f93a0a8..000000000 --- a/common/third_party/aruco_ros +++ /dev/null @@ -1 +0,0 @@ -Subproject commit e8f93a0a8ffce542ec1952c573e7f1a063ed2a87 diff --git a/common/vector_databases/lasr_vector_databases_faiss/.gitignore b/common/vector_databases/lasr_vector_databases_faiss/.gitignore new file mode 100644 index 000000000..0d45877c1 --- /dev/null +++ b/common/vector_databases/lasr_vector_databases_faiss/.gitignore @@ -0,0 +1,2 @@ +data/** +!data/.gitkeep \ No newline at end of file diff --git a/legacy/meet_and_greet/CMakeLists.txt b/common/vector_databases/lasr_vector_databases_faiss/CMakeLists.txt similarity index 85% rename from legacy/meet_and_greet/CMakeLists.txt rename to common/vector_databases/lasr_vector_databases_faiss/CMakeLists.txt index e07fb36af..48659fa8c 100644 --- a/legacy/meet_and_greet/CMakeLists.txt +++ b/common/vector_databases/lasr_vector_databases_faiss/CMakeLists.txt @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 3.0.2) -project(meet_and_greet) +project(lasr_vector_databases_faiss) ## Compile as C++11, supported in ROS Kinetic and newer # add_compile_options(-std=c++11) @@ -7,10 +7,9 @@ project(meet_and_greet) ## Find catkin macros and libraries ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) ## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - roscpp - rospy - std_msgs +find_package(catkin REQUIRED catkin_virtualenv COMPONENTS +rospy +lasr_vector_databases_msgs ) ## System dependencies are found with CMake's conventions @@ -21,7 +20,10 @@ find_package(catkin REQUIRED COMPONENTS ## modules and global scripts declared therein get installed ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html catkin_python_setup() - +catkin_generate_virtualenv( + INPUT_REQUIREMENTS requirements.in + PYTHON_INTERPRETER python3.9 +) ################################################ ## Declare ROS messages, services and actions ## ################################################ @@ -56,18 +58,15 @@ catkin_python_setup() ## Generate services in the 'srv' folder # add_service_files( # FILES -# Service1.srv -# Service2.srv # ) -## Generate actions in the 'action' folder +# Generate actions in the 'action' folder # add_action_files( -# FILES -# Action1.action -# Action2.action +# DIRECTORY action +# FILES WaitGreet.action Identify.action Greet.action GetName.action LearnFace.action GetCommand.action Guide.action DetectPeople.action FindPerson.action ReceiveObject.action HandoverObject.action # ) -## Generate added messages and services with any dependencies listed here +# Generate added messages and services with any dependencies listed here # generate_messages( # DEPENDENCIES # std_msgs @@ -104,8 +103,7 @@ catkin_python_setup() ## DEPENDS: system dependencies of this project that dependent projects also need catkin_package( # INCLUDE_DIRS include -# LIBRARIES meet_and_greet -# CATKIN_DEPENDS roscpp rospy std_msgs +# LIBRARIES qualification # DEPENDS system_lib ) @@ -122,7 +120,7 @@ include_directories( ## Declare a C++ library # add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/meet_and_greet.cpp +# src/${PROJECT_NAME}/qualification.cpp # ) ## Add cmake target dependencies of the library @@ -133,7 +131,7 @@ include_directories( ## Declare a C++ executable ## With catkin_make all packages are built within a single CMake context ## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/meet_and_greet_node.cpp) +# add_executable(${PROJECT_NAME}_node src/qualification_node.cpp) ## Rename C++ executable without prefix ## The above recommended prefix causes long target names, the following renames the @@ -159,10 +157,13 @@ include_directories( ## Mark executable scripts (Python etc.) for installation ## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) +catkin_install_python(PROGRAMS + nodes/txt_index_service + nodes/txt_query_service + scripts/test_index_service.py + scripts/test_query_service.py + DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +) ## Mark executables for installation ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html @@ -186,18 +187,17 @@ include_directories( # ) ## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) +install(FILES + requirements.txt + DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} +) ############# ## Testing ## ############# ## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_meet_and_greet.cpp) +# catkin_add_gtest(${PROJECT_NAME}-test test/test_qualification.cpp) # if(TARGET ${PROJECT_NAME}-test) # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) # endif() diff --git a/legacy/face_detection/__init__.py b/common/vector_databases/lasr_vector_databases_faiss/data/.gitkeep similarity index 100% rename from legacy/face_detection/__init__.py rename to common/vector_databases/lasr_vector_databases_faiss/data/.gitkeep diff --git a/legacy/face_detection/src/__init__.py b/common/vector_databases/lasr_vector_databases_faiss/doc/TECHNICAL.md similarity index 100% rename from legacy/face_detection/src/__init__.py rename to common/vector_databases/lasr_vector_databases_faiss/doc/TECHNICAL.md diff --git a/common/vector_databases/lasr_vector_databases_faiss/doc/USAGE.md b/common/vector_databases/lasr_vector_databases_faiss/doc/USAGE.md new file mode 100644 index 000000000..d17914026 --- /dev/null +++ b/common/vector_databases/lasr_vector_databases_faiss/doc/USAGE.md @@ -0,0 +1,33 @@ +This package currently contains two services `txt_index_service` and `txt_query_service`. These services are used to create and search (respectively) a vector database of natural language sentence embeddings. + +# Index Service +The Index service is used to create a [FAISS](https://github.com/facebookresearch/faiss) index object containing a set of sentence embeddings, where each sentence is assumed to be a line in a given `.txt` file. This Index object is saved to disk at a specified location, and can be thought of as a Vector Database. + +## Request +The request takes two string parameters: `txt_path` which is the path to the `.txt` file we wish to create sentence embeddings for, where each line in this file is treated as a sentence; and `index_path` which is the path to a `.index` file that will be created by the Service. + +## Response +No response is given from this service. + +## Example Usage +Please see the `scripts/test_index_service.py` script for a simple example of sending a request to the service. + +# Query Service +The query service is used to search the `.index` file created by the Index Service to find the most similar sentences given an input query sentence. + +## Request +The request requires four fields: + +1. `txt_path` -- this is a `string` that is the path to the txt file that contains the original sentences that the `.index` file was populated with. +2. `index_path` -- this is a `string` that is the path to the `.index` file that was created with the Index Service, on the same txt file as the `txt_path`. +3. `query_sentence` -- this is a `string` that is the sentence that you wish to query the index with and find the most similar sentence. +4. `k` -- this is a `uint8` that is the number of closest sentences you wish to return. + +## Response +The response contains two fields: + +1. `closest_sentences` -- this is an ordered list of `string`s that contain the closest sentences to the given query sentence. +2. `cosine_similaities` -- this is an ordered list of `float32`s that contain the cosine similarity scores of the closest sentences. + +## Example Usage +Please see the `scripts/test_query_service.py` script for a simple example of sending a request to the service. \ No newline at end of file diff --git a/common/vector_databases/lasr_vector_databases_faiss/nodes/txt_index_service b/common/vector_databases/lasr_vector_databases_faiss/nodes/txt_index_service new file mode 100644 index 000000000..a9c5c85b6 --- /dev/null +++ b/common/vector_databases/lasr_vector_databases_faiss/nodes/txt_index_service @@ -0,0 +1,33 @@ +#!/usr/bin/env python3 +import rospy +import numpy as np +from lasr_vector_databases_msgs.srv import TxtIndexRequest, TxtIndexResponse, TxtIndex +from lasr_vector_databases_faiss import ( + load_model, + parse_txt_file, + get_sentence_embeddings, + create_vector_database, +) + + +class TxtIndexService: + def __init__(self): + rospy.init_node("txt_index_service") + rospy.Service("lasr_faiss/txt_index", TxtIndex, self.execute_cb) + self._sentence_embedding_model = load_model() + rospy.loginfo("Text index service started") + + def execute_cb(self, req: TxtIndexRequest): + txt_fp: str = req.txt_path + sentences_to_embed: list[str] = parse_txt_file(txt_fp) + sentence_embeddings: np.ndarray = get_sentence_embeddings( + sentences_to_embed, self._sentence_embedding_model + ) + index_path: str = req.index_path + create_vector_database(sentence_embeddings, index_path) + return TxtIndexResponse() + + +if __name__ == "__main__": + TxtIndexService() + rospy.spin() diff --git a/common/vector_databases/lasr_vector_databases_faiss/nodes/txt_query_service b/common/vector_databases/lasr_vector_databases_faiss/nodes/txt_query_service new file mode 100644 index 000000000..e45610fd3 --- /dev/null +++ b/common/vector_databases/lasr_vector_databases_faiss/nodes/txt_query_service @@ -0,0 +1,45 @@ +#!/usr/bin/env python3 +import rospy +import numpy as np + +from lasr_vector_databases_msgs.srv import ( + TxtQueryRequest, + TxtQueryResponse, + TxtQuery, +) +from lasr_vector_databases_faiss import ( + load_model, + parse_txt_file, + get_sentence_embeddings, + load_vector_database, + query_database, +) + + +class TxtQueryService: + def __init__(self): + rospy.init_node("txt_query_service") + self._sentence_embedding_model = load_model() + rospy.Service("lasr_faiss/txt_query", TxtQuery, self.execute_cb) + rospy.loginfo("Text Query service started") + + def execute_cb(self, req: TxtQueryRequest) -> TxtQueryResponse: + txt_fp: str = req.txt_path + index_path: str = req.index_path + query_sentence: str = req.query_sentence + possible_matches: list[str] = parse_txt_file(txt_fp) + query_embedding: np.ndarray = get_sentence_embeddings( + [query_sentence], self._sentence_embedding_model # requires list of strings + ) + distances, indices = query_database(index_path, query_embedding, k=req.k) + nearest_matches = [possible_matches[i] for i in indices[0]] + + return TxtQueryResponse( + closest_sentences=nearest_matches, + cosine_similarities=distances[0].tolist(), + ) + + +if __name__ == "__main__": + TxtQueryService() + rospy.spin() diff --git a/legacy/cv_bridge3/package.xml b/common/vector_databases/lasr_vector_databases_faiss/package.xml similarity index 88% rename from legacy/cv_bridge3/package.xml rename to common/vector_databases/lasr_vector_databases_faiss/package.xml index 7ae194569..1546f0a38 100644 --- a/legacy/cv_bridge3/package.xml +++ b/common/vector_databases/lasr_vector_databases_faiss/package.xml @@ -1,25 +1,25 @@ - cv_bridge3 + lasr_vector_databases_faiss 0.0.0 - The cv_bridge3 package + The faiss package - jared + Matt Barker - TODO + MIT - + @@ -50,8 +50,7 @@ catkin catkin_virtualenv - - + lasr_vector_databases_msgs diff --git a/common/vector_databases/lasr_vector_databases_faiss/requirements.in b/common/vector_databases/lasr_vector_databases_faiss/requirements.in new file mode 100644 index 000000000..3259a62c9 --- /dev/null +++ b/common/vector_databases/lasr_vector_databases_faiss/requirements.in @@ -0,0 +1,3 @@ +faiss-cpu +sentence-transformers +torch \ No newline at end of file diff --git a/common/vector_databases/lasr_vector_databases_faiss/requirements.txt b/common/vector_databases/lasr_vector_databases_faiss/requirements.txt new file mode 100644 index 000000000..4557b8f0c --- /dev/null +++ b/common/vector_databases/lasr_vector_databases_faiss/requirements.txt @@ -0,0 +1,53 @@ +--extra-index-url https://pypi.ngc.nvidia.com +--trusted-host pypi.ngc.nvidia.com + +certifi==2024.2.2 # via requests +charset-normalizer==3.3.2 # via requests +click==8.1.7 # via nltk +faiss-cpu==1.7.4 # via -r requirements.in +filelock==3.13.1 # via huggingface-hub, torch, transformers, triton +fsspec==2024.2.0 # via huggingface-hub, torch +ftfy==6.1.3 # via -r requirements.in, clip +huggingface-hub==0.20.3 # via sentence-transformers, tokenizers, transformers +idna==3.6 # via requests +jinja2==3.1.3 # via torch +joblib==1.3.2 # via nltk, scikit-learn +markupsafe==2.1.5 # via jinja2 +mpmath==1.3.0 # via sympy +networkx==3.2.1 # via torch +nltk==3.8.1 # via sentence-transformers +numpy==1.26.3 # via opencv-python, scikit-learn, scipy, sentence-transformers, torchvision, transformers +nvidia-cublas-cu12==12.1.3.1 # via nvidia-cudnn-cu12, nvidia-cusolver-cu12, torch +nvidia-cuda-cupti-cu12==12.1.105 # via torch +nvidia-cuda-nvrtc-cu12==12.1.105 # via torch +nvidia-cuda-runtime-cu12==12.1.105 # via torch +nvidia-cudnn-cu12==8.9.2.26 # via torch +nvidia-cufft-cu12==11.0.2.54 # via torch +nvidia-curand-cu12==10.3.2.106 # via torch +nvidia-cusolver-cu12==11.4.5.107 # via torch +nvidia-cusparse-cu12==12.1.0.106 # via nvidia-cusolver-cu12, torch +nvidia-nccl-cu12==2.19.3 # via torch +nvidia-nvjitlink-cu12==12.3.101 # via nvidia-cusolver-cu12, nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 # via torch +opencv-python==4.9.0.80 # via -r requirements.in +packaging==23.2 # via huggingface-hub, transformers +pillow==10.2.0 # via sentence-transformers, torchvision +pyyaml==6.0.1 # via huggingface-hub, transformers +regex==2023.12.25 # via -r requirements.in, clip, nltk, transformers +requests==2.31.0 # via huggingface-hub, torchvision, transformers +safetensors==0.4.2 # via transformers +scikit-learn==1.4.0 # via sentence-transformers +scipy==1.12.0 # via scikit-learn, sentence-transformers +sentence-transformers==2.3.1 # via -r requirements.in +sentencepiece==0.1.99 # via sentence-transformers +sympy==1.12 # via torch +threadpoolctl==3.2.0 # via scikit-learn +tokenizers==0.15.1 # via transformers +torch==2.2.0 # via clip, sentence-transformers, torchvision +torchvision==0.17.0 # via clip +tqdm==4.66.1 # via -r requirements.in, clip, huggingface-hub, nltk, sentence-transformers, transformers +transformers==4.37.2 # via sentence-transformers +triton==2.2.0 # via torch +typing-extensions==4.9.0 # via huggingface-hub, torch +urllib3==2.2.0 # via requests +wcwidth==0.2.13 # via ftfy diff --git a/common/vector_databases/lasr_vector_databases_faiss/scripts/test_index_service.py b/common/vector_databases/lasr_vector_databases_faiss/scripts/test_index_service.py new file mode 100644 index 000000000..cc7f12f3c --- /dev/null +++ b/common/vector_databases/lasr_vector_databases_faiss/scripts/test_index_service.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +import rospy +from lasr_vector_databases_faiss.srv import TxtIndex, TxtIndexRequest + +request = TxtIndexRequest() + +request.txt_path = ( + "/home/mattbarker/LASR/lasr_ws/src/lasr-base/tasks/gpsr/data/questions.txt" +) + +request.index_path = ( + "/home/mattbarker/LASR/lasr_ws/src/lasr-base/tasks/gpsr/data/questions.index" +) +rospy.ServiceProxy("lasr_faiss/txt_index", TxtIndex)(request) diff --git a/common/vector_databases/lasr_vector_databases_faiss/scripts/test_query_service.py b/common/vector_databases/lasr_vector_databases_faiss/scripts/test_query_service.py new file mode 100644 index 000000000..4ae89e530 --- /dev/null +++ b/common/vector_databases/lasr_vector_databases_faiss/scripts/test_query_service.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 +import rospy +from lasr_vector_databases_faiss.srv import TxtQuery, TxtQueryRequest + +request = TxtQueryRequest() + +request.txt_path = ( + "/home/mattbarker/LASR/lasr_ws/src/lasr-base/tasks/gpsr/data/questions.txt" +) + +request.index_path = ( + "/home/mattbarker/LASR/lasr_ws/src/lasr-base/tasks/gpsr/data/questions.index" +) + +request.query_sentence = "Do French like snails?" + +request.k = 3 + +response = rospy.ServiceProxy("lasr_faiss/txt_query", TxtQuery)(request) + +print(response.closest_sentences) +print(response.cosine_similarities) diff --git a/common/helpers/tf_module/setup.py b/common/vector_databases/lasr_vector_databases_faiss/setup.py similarity index 68% rename from common/helpers/tf_module/setup.py rename to common/vector_databases/lasr_vector_databases_faiss/setup.py index dcfac4bf8..443aec4a4 100644 --- a/common/helpers/tf_module/setup.py +++ b/common/vector_databases/lasr_vector_databases_faiss/setup.py @@ -3,10 +3,8 @@ from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup -# fetch values from package.xml setup_args = generate_distutils_setup( - packages=['tf_module'], - package_dir={'': 'src'} + packages=["lasr_vector_databases_faiss"], package_dir={"": "src"} ) setup(**setup_args) diff --git a/common/vector_databases/lasr_vector_databases_faiss/src/lasr_vector_databases_faiss/__init__.py b/common/vector_databases/lasr_vector_databases_faiss/src/lasr_vector_databases_faiss/__init__.py new file mode 100644 index 000000000..698927d9c --- /dev/null +++ b/common/vector_databases/lasr_vector_databases_faiss/src/lasr_vector_databases_faiss/__init__.py @@ -0,0 +1,2 @@ +from .database_utils import create_vector_database, load_vector_database, query_database +from .get_sentence_embeddings import get_sentence_embeddings, load_model, parse_txt_file diff --git a/common/vector_databases/lasr_vector_databases_faiss/src/lasr_vector_databases_faiss/database_utils.py b/common/vector_databases/lasr_vector_databases_faiss/src/lasr_vector_databases_faiss/database_utils.py new file mode 100644 index 000000000..6d0139072 --- /dev/null +++ b/common/vector_databases/lasr_vector_databases_faiss/src/lasr_vector_databases_faiss/database_utils.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python3 +import os +import numpy as np +import faiss + + +def create_vector_database( + vectors: np.ndarray, + index_path: str, + overwrite: bool = False, + index_type: str = "Flat", + normalise_vecs: bool = True, +) -> None: + """Creates a FAISS Index using the factory constructor and the given + index type, and adds the given vector to the index, and then saves + it to disk using the given path. + + Args: + vectors (np.ndarray): vector of shape (n_vectors, vector_dim) + index_path (str): path to save the index + overwrite (bool, optional): Whether to replace an existing index + at the same filepath if it exists. Defaults to False. + index_type (str, optional): FAISS Index Factory string. Defaults to "IndexFlatIP". + normalise_vecs (bool, optional): Whether to normalise the vectors before + adding them to the Index. This converts the IP metric to Cosine Similarity. + Defaults to True. + """ + + if os.path.exists(index_path) and not overwrite: + raise FileExistsError( + f"Index already exists at {index_path}. Set overwrite=True to replace it." + ) + + index = faiss.index_factory( + vectors.shape[1], index_type, faiss.METRIC_INNER_PRODUCT + ) + if normalise_vecs: + faiss.normalize_L2(vectors) + index.add(vectors) + faiss.write_index(index, index_path) + + +def load_vector_database(index_path: str, use_gpu: bool = False) -> faiss.Index: + """Loads a FAISS Index from the given filepath + + Args: + index_path (str): path to the index file + use_gpu (bool, optional): Whether to load the index onto the GPU. + Defaults to False. + + Returns: + faiss.Index: FAISS Index object + """ + print("Loading index from", index_path) + index = faiss.read_index(index_path) + print("Loaded index with ntotal:", index.ntotal) + if use_gpu: + index = faiss.index_cpu_to_all_gpus(index) + return index + + +def query_database( + index_path: str, + query_vectors: np.ndarray, + normalise: bool = True, + k: int = 1, +) -> tuple[np.ndarray, np.ndarray]: + """Queries the given index with the given query vectors + + Args: + index_path (str): path to the index file + query_vectors (np.ndarray): query vectors of shape (n_queries, vector_dim) + normalise (bool, optional): Whether to normalise the query vectors. + Defaults to True. + k (int, optional): Number of nearest neighbours to return. Defaults to 1. + + Returns: + tuple[np.ndarray, np.ndarray]: (distances, indices) of the nearest neighbours + each of shape (n_queries, n_neighbours) + """ + index = load_vector_database(index_path) + if normalise: + faiss.normalize_L2(query_vectors) + distances, indices = index.search(query_vectors, k) + return distances, indices diff --git a/common/vector_databases/lasr_vector_databases_faiss/src/lasr_vector_databases_faiss/get_sentence_embeddings.py b/common/vector_databases/lasr_vector_databases_faiss/src/lasr_vector_databases_faiss/get_sentence_embeddings.py new file mode 100644 index 000000000..1ba43b48c --- /dev/null +++ b/common/vector_databases/lasr_vector_databases_faiss/src/lasr_vector_databases_faiss/get_sentence_embeddings.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python3 +import torch +import numpy as np +from sentence_transformers import SentenceTransformer + +DEVICE = "cuda" if torch.cuda.is_available() else "cpu" + + +def load_model(model_name: str = "all-MiniLM-L6-v2") -> SentenceTransformer: + """Loads the sentence transformer model + Args: + model_name (str): name of the model to load + Returns: + sentence_transformers.SentenceTransformer: the loaded model + """ + return SentenceTransformer(model_name, device=DEVICE) + + +def parse_txt_file(fp: str) -> list[str]: + """Parses a txt file into a list of strings, + where each element is a line in the txt file with the + newline char stripped. + Args: + fp (str): path to the txt file to load + Returns: + list[str]: list of strings where each element is a line in the txt file + """ + sentences = [] + with open(fp, "r", encoding="utf8") as src: + for line in src: + # Strip newline char. + sentences.append(line[:-1]) + return sentences + + +def get_sentence_embeddings( + sentence_list: list[str], model: SentenceTransformer +) -> np.ndarray: + """Converts the list of string sentences into an array of sentence + embeddings + Args: + sentence_list (list[str]): list of string sentences, where each + entry in the list is assumed to be a separate sentence + model (SentenceTransformer): model used to perform the embedding. + Assumes a method called encode that takes a list of strings + as input. + Returns: + np.ndarray: array of shape (n_commands, embedding_dim) + """ + return model.encode( + sentence_list, + convert_to_numpy=True, + show_progress_bar=True, + batch_size=256, + device=DEVICE, + ) diff --git a/legacy/lasr_dialogflow/CMakeLists.txt b/common/vector_databases/lasr_vector_databases_msgs/CMakeLists.txt similarity index 92% rename from legacy/lasr_dialogflow/CMakeLists.txt rename to common/vector_databases/lasr_vector_databases_msgs/CMakeLists.txt index 9eaba0e7e..63fd162dc 100644 --- a/legacy/lasr_dialogflow/CMakeLists.txt +++ b/common/vector_databases/lasr_vector_databases_msgs/CMakeLists.txt @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 3.0.2) -project(lasr_dialogflow) +project(lasr_vector_databases_msgs) ## Compile as C++11, supported in ROS Kinetic and newer # add_compile_options(-std=c++11) @@ -7,7 +7,7 @@ project(lasr_dialogflow) ## Find catkin macros and libraries ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) ## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS message_generation message_runtime rospy std_msgs) +find_package(catkin REQUIRED COMPONENTS message_generation message_runtime) ## System dependencies are found with CMake's conventions # find_package(Boost REQUIRED COMPONENTS system) @@ -16,7 +16,7 @@ find_package(catkin REQUIRED COMPONENTS message_generation message_runtime rospy ## Uncomment this if the package has a setup.py. This macro ensures ## modules and global scripts declared therein get installed ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -catkin_python_setup() +# catkin_python_setup() ################################################ ## Declare ROS messages, services and actions ## @@ -45,28 +45,20 @@ catkin_python_setup() ## Generate messages in the 'msg' folder # add_message_files( # FILES -# Message1.msg -# Message2.msg # ) ## Generate services in the 'srv' folder add_service_files( - FILES - DialogflowAudio.srv - DialogflowText.srv + FILES + TxtIndex.srv + TxtQuery.srv ) -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) +# Generate actions in the 'action' folder ## Generate added messages and services with any dependencies listed here generate_messages( - DEPENDENCIES - std_msgs # Or other packages containing msgs + DEPENDENCIES ) ################################################ @@ -100,7 +92,7 @@ generate_messages( ## DEPENDS: system dependencies of this project that dependent projects also need catkin_package( # INCLUDE_DIRS include -# LIBRARIES lasr_dialogflow +# LIBRARIES lasr_vision_msgs # CATKIN_DEPENDS other_catkin_pkg # DEPENDS system_lib ) @@ -118,7 +110,7 @@ include_directories( ## Declare a C++ library # add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/lasr_dialogflow.cpp +# src/${PROJECT_NAME}/lasr_vision_msgs.cpp # ) ## Add cmake target dependencies of the library @@ -129,7 +121,7 @@ include_directories( ## Declare a C++ executable ## With catkin_make all packages are built within a single CMake context ## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/lasr_dialogflow_node.cpp) +# add_executable(${PROJECT_NAME}_node src/lasr_vision_msgs_node.cpp) ## Rename C++ executable without prefix ## The above recommended prefix causes long target names, the following renames the @@ -193,7 +185,7 @@ include_directories( ############# ## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_lasr_dialogflow.cpp) +# catkin_add_gtest(${PROJECT_NAME}-test test/test_lasr_vision_msgs.cpp) # if(TARGET ${PROJECT_NAME}-test) # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) # endif() diff --git a/legacy/lasr_interaction_server/package.xml b/common/vector_databases/lasr_vector_databases_msgs/package.xml similarity index 85% rename from legacy/lasr_interaction_server/package.xml rename to common/vector_databases/lasr_vector_databases_msgs/package.xml index f751e07b0..5f4e45e9f 100644 --- a/legacy/lasr_interaction_server/package.xml +++ b/common/vector_databases/lasr_vector_databases_msgs/package.xml @@ -1,25 +1,25 @@ - lasr_interaction_server + lasr_vector_databases_msgs 0.0.0 - The lasr_interaction_server package + Messages required for vector databases - jared + Matt Barker - TODO + MIT - + @@ -49,6 +49,8 @@ catkin + message_generation + message_runtime diff --git a/common/vector_databases/lasr_vector_databases_msgs/srv/TxtIndex.srv b/common/vector_databases/lasr_vector_databases_msgs/srv/TxtIndex.srv new file mode 100644 index 000000000..79ac01654 --- /dev/null +++ b/common/vector_databases/lasr_vector_databases_msgs/srv/TxtIndex.srv @@ -0,0 +1,7 @@ +# Path to input text file +string txt_path + +# Output path to save index +string index_path + +--- diff --git a/common/vector_databases/lasr_vector_databases_msgs/srv/TxtQuery.srv b/common/vector_databases/lasr_vector_databases_msgs/srv/TxtQuery.srv new file mode 100644 index 000000000..bbcb04613 --- /dev/null +++ b/common/vector_databases/lasr_vector_databases_msgs/srv/TxtQuery.srv @@ -0,0 +1,19 @@ +# Path to input text file +string txt_path + +# Path to index file to load +string index_path + +# Sentence to query index with +string query_sentence + +# Number of nearest sentences to return +uint8 k + +--- +# Nearest sentence +string[] closest_sentences + +# Cosine similarity of distances +float32[] cosine_similarities + diff --git a/common/vision/lasr_vision_clip/.gitignore b/common/vision/lasr_vision_clip/.gitignore new file mode 100644 index 000000000..0d45877c1 --- /dev/null +++ b/common/vision/lasr_vision_clip/.gitignore @@ -0,0 +1,2 @@ +data/** +!data/.gitkeep \ No newline at end of file diff --git a/legacy/lasr_navigate_to_known_person/CMakeLists.txt b/common/vision/lasr_vision_clip/CMakeLists.txt similarity index 86% rename from legacy/lasr_navigate_to_known_person/CMakeLists.txt rename to common/vision/lasr_vision_clip/CMakeLists.txt index 3cf9eac00..739bfda15 100644 --- a/legacy/lasr_navigate_to_known_person/CMakeLists.txt +++ b/common/vision/lasr_vision_clip/CMakeLists.txt @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 3.0.2) -project(lasr_navigate_to_known_person) +project(lasr_vision_clip) ## Compile as C++11, supported in ROS Kinetic and newer # add_compile_options(-std=c++11) @@ -7,9 +7,7 @@ project(lasr_navigate_to_known_person) ## Find catkin macros and libraries ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) ## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - rospy -) +find_package(catkin REQUIRED catkin_virtualenv) ## System dependencies are found with CMake's conventions # find_package(Boost REQUIRED COMPONENTS system) @@ -19,7 +17,10 @@ find_package(catkin REQUIRED COMPONENTS ## modules and global scripts declared therein get installed ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html catkin_python_setup() - +catkin_generate_virtualenv( + INPUT_REQUIREMENTS requirements.in + PYTHON_INTERPRETER python3.10 +) ################################################ ## Declare ROS messages, services and actions ## ################################################ @@ -47,28 +48,26 @@ catkin_python_setup() ## Generate messages in the 'msg' folder # add_message_files( # FILES -# Message1.msg -# Message2.msg +# VqaResult.msg +# VqaResult.msg # ) -## Generate services in the 'srv' folder +# Generate services in the 'srv' folder # add_service_files( # FILES -# Service1.srv -# Service2.srv +# Vqa.srv # ) -## Generate actions in the 'action' folder +# Generate actions in the 'action' folder # add_action_files( -# FILES -# Action1.action -# Action2.action +# DIRECTORY action +# FILES WaitGreet.action Identify.action Greet.action GetName.action LearnFace.action GetCommand.action Guide.action DetectPeople.action FindPerson.action ReceiveObject.action HandoverObject.action # ) -## Generate added messages and services with any dependencies listed here +# Generate added messages and services with any dependencies listed here # generate_messages( # DEPENDENCIES -# std_msgs # Or other packages containing msgs +# sensor_msgs # ) ################################################ @@ -102,8 +101,7 @@ catkin_python_setup() ## DEPENDS: system dependencies of this project that dependent projects also need catkin_package( # INCLUDE_DIRS include -# LIBRARIES lasr_navigate_to_known_person -# CATKIN_DEPENDS rospy +# LIBRARIES qualification # DEPENDS system_lib ) @@ -120,7 +118,7 @@ include_directories( ## Declare a C++ library # add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/lasr_navigate_to_known_person.cpp +# src/${PROJECT_NAME}/qualification.cpp # ) ## Add cmake target dependencies of the library @@ -131,7 +129,7 @@ include_directories( ## Declare a C++ executable ## With catkin_make all packages are built within a single CMake context ## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/lasr_navigate_to_known_person_node.cpp) +# add_executable(${PROJECT_NAME}_node src/qualification_node.cpp) ## Rename C++ executable without prefix ## The above recommended prefix causes long target names, the following renames the @@ -157,10 +155,10 @@ include_directories( ## Mark executable scripts (Python etc.) for installation ## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) +catkin_install_python(PROGRAMS + nodes/vqa + DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +) ## Mark executables for installation ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html @@ -184,18 +182,17 @@ include_directories( # ) ## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) +install(FILES + requirements.txt + DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} +) ############# ## Testing ## ############# ## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_lasr_navigate_to_known_person.cpp) +# catkin_add_gtest(${PROJECT_NAME}-test test/test_qualification.cpp) # if(TARGET ${PROJECT_NAME}-test) # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) # endif() diff --git a/legacy/face_detection/src/face_detection/__init__.py b/common/vision/lasr_vision_clip/__init__.py similarity index 100% rename from legacy/face_detection/src/face_detection/__init__.py rename to common/vision/lasr_vision_clip/__init__.py diff --git a/legacy/find_person_and_ask_open_door/src/find_person_and_ask_open_door/__init__.py b/common/vision/lasr_vision_clip/data/.gitkeep similarity index 100% rename from legacy/find_person_and_ask_open_door/src/find_person_and_ask_open_door/__init__.py rename to common/vision/lasr_vision_clip/data/.gitkeep diff --git a/common/vision/lasr_vision_clip/nodes/vqa b/common/vision/lasr_vision_clip/nodes/vqa new file mode 100644 index 000000000..f5bc344a5 --- /dev/null +++ b/common/vision/lasr_vision_clip/nodes/vqa @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 +import rospy +from typing import List +from lasr_vision_clip.clip_utils import load_model, query_image_stream +from lasr_vision_msgs.srv import VqaRequest, VqaResponse, Vqa +from sensor_msgs.msg import Image + + +class VqaService: + def __init__(self, model_device: str = "cuda") -> None: + """Caches the clip model. + + Args: + model_device (str, optional): device to load model onto. Defaults to "cuda". + + """ + + self._model = load_model(model_device) + self._debug_pub = rospy.Publisher("/clip_vqa/debug", Image, queue_size=1) + rospy.loginfo("Clip VQA service started") + + def query_clip(self, request: VqaRequest) -> VqaResponse: + """Queries CLIP from the robot's image stream and returns + the most likely answer and cosine similarity score. + + Args: + possible_answers (List[str]): set of possible answers. + + Returns: + VqaResult + """ + possible_answers = request.possible_answers + answer, cos_score, annotated_img = query_image_stream( + self._model, possible_answers, annotate=True + ) + + self._debug_pub.publish(annotated_img) + + result = VqaResponse() + result.answer = answer + rospy.loginfo(f"Answer: {answer}") + result.similarity = float(cos_score) + return result + + +if __name__ == "__main__": + rospy.init_node("clip_vqa_service") + service = VqaService() + rospy.Service("/clip_vqa/query_service", Vqa, service.query_clip) + rospy.spin() diff --git a/legacy/face_detection/package.xml b/common/vision/lasr_vision_clip/package.xml similarity index 80% rename from legacy/face_detection/package.xml rename to common/vision/lasr_vision_clip/package.xml index bfb1b723e..165c21c6e 100644 --- a/legacy/face_detection/package.xml +++ b/common/vision/lasr_vision_clip/package.xml @@ -1,25 +1,25 @@ - face_detection + lasr_vision_clip 0.0.0 - The face_detection package + The lasr_vision_clip package - nicole + Matt Barker - TODO + MIT - + @@ -49,18 +49,15 @@ catkin - rospy - std_msgs - rospy - std_msgs - rospy - std_msgs + catkin_virtualenv + message_generation + message_runtime + sensor_msgs + sensor_msgs lasr_vision_msgs - - - + requirements.txt - \ No newline at end of file + diff --git a/common/vision/lasr_vision_clip/requirements.in b/common/vision/lasr_vision_clip/requirements.in new file mode 100644 index 000000000..b03f1e5cc --- /dev/null +++ b/common/vision/lasr_vision_clip/requirements.in @@ -0,0 +1,2 @@ +sentence-transformers +opencv-python \ No newline at end of file diff --git a/common/vision/lasr_vision_clip/requirements.txt b/common/vision/lasr_vision_clip/requirements.txt new file mode 100644 index 000000000..7c61ba101 --- /dev/null +++ b/common/vision/lasr_vision_clip/requirements.txt @@ -0,0 +1,43 @@ +certifi==2024.2.2 # via requests +charset-normalizer==3.3.2 # via requests +filelock==3.13.4 # via huggingface-hub, torch, transformers, triton +fsspec==2024.3.1 # via huggingface-hub, torch +huggingface-hub==0.22.2 # via sentence-transformers, tokenizers, transformers +idna==3.7 # via requests +jinja2==3.1.3 # via torch +joblib==1.4.0 # via scikit-learn +markupsafe==2.1.5 # via jinja2 +mpmath==1.3.0 # via sympy +networkx==3.2.1 # via torch +numpy==1.26.4 # via opencv-python, scikit-learn, scipy, sentence-transformers, transformers +nvidia-cublas-cu12==12.1.3.1 # via nvidia-cudnn-cu12, nvidia-cusolver-cu12, torch +nvidia-cuda-cupti-cu12==12.1.105 # via torch +nvidia-cuda-nvrtc-cu12==12.1.105 # via torch +nvidia-cuda-runtime-cu12==12.1.105 # via torch +nvidia-cudnn-cu12==8.9.2.26 # via torch +nvidia-cufft-cu12==11.0.2.54 # via torch +nvidia-curand-cu12==10.3.2.106 # via torch +nvidia-cusolver-cu12==11.4.5.107 # via torch +nvidia-cusparse-cu12==12.1.0.106 # via nvidia-cusolver-cu12, torch +nvidia-nccl-cu12==2.19.3 # via torch +nvidia-nvjitlink-cu12==12.4.127 # via nvidia-cusolver-cu12, nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 # via torch +opencv-python==4.9.0.80 # via -r requirements.in +packaging==24.0 # via huggingface-hub, transformers +pillow==10.3.0 # via sentence-transformers +pyyaml==6.0.1 # via huggingface-hub, transformers +regex==2024.4.16 # via transformers +requests==2.31.0 # via huggingface-hub, transformers +safetensors==0.4.3 # via transformers +scikit-learn==1.4.2 # via sentence-transformers +scipy==1.13.0 # via scikit-learn, sentence-transformers +sentence-transformers==2.7.0 # via -r requirements.in +sympy==1.12 # via torch +threadpoolctl==3.4.0 # via scikit-learn +tokenizers==0.15.2 # via transformers +torch==2.2.2 # via sentence-transformers +tqdm==4.66.2 # via huggingface-hub, sentence-transformers, transformers +transformers==4.39.3 # via sentence-transformers +triton==2.2.0 # via torch +typing-extensions==4.11.0 # via huggingface-hub, torch +urllib3==2.2.1 # via requests diff --git a/legacy/people_detection/train_dataset_model/setup.py b/common/vision/lasr_vision_clip/setup.py similarity index 73% rename from legacy/people_detection/train_dataset_model/setup.py rename to common/vision/lasr_vision_clip/setup.py index f91f24c03..d02820e3c 100644 --- a/legacy/people_detection/train_dataset_model/setup.py +++ b/common/vision/lasr_vision_clip/setup.py @@ -4,8 +4,7 @@ from catkin_pkg.python_setup import generate_distutils_setup setup_args = generate_distutils_setup( - packages=['train_dataset_model'], - package_dir={'': 'src'} + packages=["lasr_vision_clip"], package_dir={"": "src"} ) setup(**setup_args) diff --git a/common/vision/lasr_vision_clip/src/lasr_vision_clip/__init__.py b/common/vision/lasr_vision_clip/src/lasr_vision_clip/__init__.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/common/vision/lasr_vision_clip/src/lasr_vision_clip/__init__.py @@ -0,0 +1 @@ + diff --git a/common/vision/lasr_vision_clip/src/lasr_vision_clip/clip_utils.py b/common/vision/lasr_vision_clip/src/lasr_vision_clip/clip_utils.py new file mode 100644 index 000000000..fc92fe7cd --- /dev/null +++ b/common/vision/lasr_vision_clip/src/lasr_vision_clip/clip_utils.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python3 +import torch +import rospy +import cv2 +import cv2_img +import numpy as np +from copy import deepcopy +from sentence_transformers import SentenceTransformer, util + +from sensor_msgs.msg import Image + + +def load_model(device: str = "cuda"): + """Load the CLIP model. + + Args: + model_name (str): the model name + device (str, optional): the device to use. Defaults to "cuda". + + Returns: + Any: the model and preprocess function + """ + model = SentenceTransformer("clip-ViT-B-32", device=device) + return model + + +def run_clip( + model: SentenceTransformer, labels: list[str], img: np.ndarray +) -> torch.Tensor: + """Run the CLIP model. + + Args: + model (Any): clip model loaded into memory + labels (List[str]): list of string labels to query image similarity to. + img (np.ndarray): the image to query + + Returns: + List[float]: the cosine similarity scores between the image and label embeddings. + """ + txt = model.encode(labels) + img = model.encode(img) + with torch.no_grad(): + cos_scores = util.cos_sim(img, txt) + return cos_scores + + +def query_image_stream( + model: SentenceTransformer, + answers: list[str], + annotate: bool = False, +) -> tuple[str, torch.Tensor, Image]: + """Queries the CLIP model with the latest image from the robot's camera + and a set of possible image captions and returns the most likely caption. + + Args: + model (SentenceTransformer): clip model to run inference on, loaded into memory + answers(list[str]): list of possible answers + annotate(bool, optional): whether to annotate the image with the most likely, and + second most likely, caption. Defaults to False. + returns: + tuple(str, torch.Tensor, Image): the most likely answer, the scores, and the annotated image msg + """ + img_msg = rospy.wait_for_message("/xtion/rgb/image_raw", Image) + img_pil = cv2_img.msg_to_pillow_img(img_msg) + + cos_scores = run_clip(model, answers, img_pil) + max_score = cos_scores.argmax() + # get second highest score in tensor + max_val = deepcopy(cos_scores[0, max_score]) + cos_scores[0, max_score] = 0 + second_max_score = cos_scores.argmax() + cos_scores[0, max_score] = max_val + # Annotate the image + + cv2_im = cv2_img.msg_to_cv2_img(img_msg) + if annotate: + cv2.putText( + cv2_im, + f"Most likely caption: {answers[max_score]}", + (10, 30), + cv2.FONT_HERSHEY_SIMPLEX, + 0.5, + (0, 255, 0), + 2, + cv2.LINE_AA, + ) + # add second score below + cv2.putText( + cv2_im, + f"Second most likely caption: {answers[second_max_score]}", + (10, 60), + cv2.FONT_HERSHEY_SIMPLEX, + 0.5, + (0, 255, 0), + 2, + cv2.LINE_AA, + ) + + img = cv2_img.cv2_img_to_msg(cv2_im) + return answers[max_score], cos_scores[0, max_score], img diff --git a/common/vision/lasr_vision_deepface/examples/greet b/common/vision/lasr_vision_deepface/examples/greet index 63778c6af..70a87a038 100644 --- a/common/vision/lasr_vision_deepface/examples/greet +++ b/common/vision/lasr_vision_deepface/examples/greet @@ -7,51 +7,63 @@ from copy import deepcopy from sensor_msgs.msg import Image from lasr_vision_msgs.srv import Recognise, RecogniseRequest +from lasr_voice import Voice if len(sys.argv) < 3: - print('Usage: rosrun lase_recognition greet ') + print("Usage: rosrun lase_recognition greet ") exit() listen_topic = sys.argv[1] dataset = sys.argv[2] people_in_frame = [] -last_received_time = None + + +people_in_frame = {} def detect(image): rospy.loginfo("Received image message") global people_in_frame - people_in_frame = [] try: - detect_service = rospy.ServiceProxy('/recognise', Recognise) + detect_service = rospy.ServiceProxy("/recognise", Recognise) req = RecogniseRequest() req.image_raw = image req.dataset = dataset - req.confidence = 0.5 + req.confidence = 0.4 resp = detect_service(req) for detection in resp.detections: - people_in_frame.append(detection.name) - print(resp) + people_in_frame[detection.name] = rospy.Time.now() except rospy.ServiceException as e: rospy.logerr("Service call failed: %s" % e) + def greet(): - print(f"Hello, {' '.join(people_in_frame)}") + voice = Voice() + voice.speak(f"Hello, {' '.join(people_in_frame)}") + + def image_callback(image): - global last_received_time - if last_received_time is None or rospy.Time.now() - last_received_time >= rospy.Duration(5.0): - prev_people_in_frame = deepcopy(people_in_frame) - detect(image) - if people_in_frame != prev_people_in_frame: - greet() - last_received_time = rospy.Time.now() + global people_in_frame + prev_people_in_frame = list(people_in_frame.keys()) + # remove detections from people_in_frame that are older than 5 seconds long + detect(image) + for person in list(people_in_frame.keys()): + if rospy.Time.now() - people_in_frame[person] > rospy.Duration(10): + del people_in_frame[person] + if ( + list(people_in_frame.keys()) != prev_people_in_frame + and len(people_in_frame) > 0 + ) or (len(prev_people_in_frame) == 0 and len(people_in_frame) > 0): + greet() + def listener(): - rospy.init_node('image_listener', anonymous=True) - rospy.wait_for_service('/recognise') - rospy.Subscriber(listen_topic, Image, image_callback) + rospy.init_node("image_listener", anonymous=True) + rospy.wait_for_service("/recognise") + rospy.Subscriber(listen_topic, Image, image_callback, queue_size=1) rospy.spin() -if __name__ == '__main__': + +if __name__ == "__main__": listener() diff --git a/common/vision/lasr_vision_deepface/examples/relay b/common/vision/lasr_vision_deepface/examples/relay index af1c9bd4d..5c35ee887 100644 --- a/common/vision/lasr_vision_deepface/examples/relay +++ b/common/vision/lasr_vision_deepface/examples/relay @@ -28,7 +28,7 @@ def detect(image): req = RecogniseRequest() req.image_raw = image req.dataset = dataset - req.confidence = 0.7 + req.confidence = 0.4 resp = detect_service(req) print(resp) except rospy.ServiceException as e: diff --git a/common/vision/lasr_vision_deepface/launch/example.launch b/common/vision/lasr_vision_deepface/launch/example.launch new file mode 100644 index 000000000..5d3281867 --- /dev/null +++ b/common/vision/lasr_vision_deepface/launch/example.launch @@ -0,0 +1,11 @@ + + + + + + + + + + + \ No newline at end of file diff --git a/common/vision/lasr_vision_deepface/nodes/service b/common/vision/lasr_vision_deepface/nodes/service index 12149844c..d6a71384a 100644 --- a/common/vision/lasr_vision_deepface/nodes/service +++ b/common/vision/lasr_vision_deepface/nodes/service @@ -4,31 +4,86 @@ import re import rospy import lasr_vision_deepface as face_recognition from sensor_msgs.msg import Image -from lasr_vision_msgs.srv import Recognise, RecogniseRequest, RecogniseResponse +from lasr_vision_msgs.srv import ( + Recognise, + RecogniseRequest, + RecogniseResponse, + LearnFace, + LearnFaceRequest, + LearnFaceResponse, +) + rospy.init_node("recognise_service") # Determine variables DEBUG = rospy.get_param("~debug", False) -debug_publishers = {} +recognise_debug_publishers = {} +learn_face_debug_publishers = {} if DEBUG: - debug_publisher = rospy.Publisher("/recognise/debug", Image, queue_size=1) + recognise_debug_publisher = rospy.Publisher("/recognise/debug", Image, queue_size=1) + learn_face_debug_publisher = rospy.Publisher( + "/learn_face/debug", Image, queue_size=1 + ) + cropped_face_publisher = rospy.Publisher( + "/learn_face/debug/cropped_query_face", Image, queue_size=1 + ) def detect(request: RecogniseRequest) -> RecogniseResponse: debug_publisher = None + similar_face_debug_publisher = None + cropped_face_publisher = None if DEBUG: - if request.dataset in debug_publishers: - debug_publisher = debug_publishers[request.dataset] + if request.dataset in recognise_debug_publishers: + debug_publisher = recognise_debug_publishers[request.dataset][0] + similar_face_debug_publisher = recognise_debug_publishers[request.dataset][ + 1 + ] + cropped_face_publisher = recognise_debug_publisher[request.dataset][2] else: topic_name = re.sub(r"[\W_]+", "", request.dataset) debug_publisher = rospy.Publisher( f"/recognise/debug/{topic_name}", Image, queue_size=1 ) - return face_recognition.detect(request, debug_publisher) + similar_face_debug_publisher = rospy.Publisher( + f"/recognise/debug/{topic_name}/similar_face", Image, queue_size=1 + ) + cropped_face_publisher = rospy.Publisher( + "/recognise/debug/cropped_query_face", Image, queue_size=1 + ) + recognise_debug_publishers[request.dataset] = ( + debug_publisher, + similar_face_debug_publisher, + cropped_face_publisher, + ) + return face_recognition.detect( + request, debug_publisher, similar_face_debug_publisher, cropped_face_publisher + ) + + +def learn_face(request: LearnFaceRequest) -> LearnFaceResponse: + debug_publisher = None + if DEBUG: + if request.dataset in learn_face_debug_publishers: + debug_publisher = learn_face_debug_publishers[request.dataset] + else: + topic_name = re.sub(r"[\W_]+", "", request.dataset) + debug_publisher = rospy.Publisher( + f"/learn_face/debug/{topic_name}", Image, queue_size=1 + ) + face_recognition.create_dataset( + "/xtion/rgb/image_raw", + request.dataset, + request.name, + request.n_images, + debug_publisher, + ) + return LearnFaceResponse() rospy.Service("/recognise", Recognise, detect) -rospy.loginfo("Face Recognition service starter") +rospy.Service("/learn_face", LearnFace, learn_face) +rospy.loginfo("Face Recognition service started") rospy.spin() diff --git a/common/vision/lasr_vision_deepface/scripts/create_dataset b/common/vision/lasr_vision_deepface/scripts/create_dataset index 9796b5665..85f8e6b3c 100644 --- a/common/vision/lasr_vision_deepface/scripts/create_dataset +++ b/common/vision/lasr_vision_deepface/scripts/create_dataset @@ -19,27 +19,7 @@ else: size = 50 import rospy -import rospkg -from sensor_msgs.msg import Image -import os -import cv2_img -import cv2 - -DATASET_ROOT = os.path.join( - rospkg.RosPack().get_path("lasr_vision_deepface"), "datasets" -) -DATASET_PATH = os.path.join(DATASET_ROOT, dataset, name) -if not os.path.exists(DATASET_PATH): - os.makedirs(DATASET_PATH) rospy.init_node("create_dataset") -rospy.loginfo(f"Taking {size} pictures of {name} and saving to {DATASET_PATH}") -for i in range(size): - img_msg = rospy.wait_for_message(topic, Image) - cv_im = cv2_img.msg_to_cv2_img(img_msg) - face_cropped_cv_im = face_recognition.detect_face(cv_im) - if face_cropped_cv_im is None: - continue - cv2.imwrite(os.path.join(DATASET_PATH, f"{name}_{i+1}.png"), face_cropped_cv_im) - rospy.loginfo(f"Took picture {i+1}") +face_recognition.create_dataset(topic, dataset, name, size) diff --git a/common/vision/lasr_vision_deepface/src/lasr_vision_deepface/__init__.py b/common/vision/lasr_vision_deepface/src/lasr_vision_deepface/__init__.py index 5a5bb56d8..e69de29bb 100644 --- a/common/vision/lasr_vision_deepface/src/lasr_vision_deepface/__init__.py +++ b/common/vision/lasr_vision_deepface/src/lasr_vision_deepface/__init__.py @@ -1 +0,0 @@ -from .deepface import detect, detect_face diff --git a/common/vision/lasr_vision_deepface/src/lasr_vision_deepface/deepface.py b/common/vision/lasr_vision_deepface/src/lasr_vision_deepface/deepface.py index af8895dad..ce9c4771b 100644 --- a/common/vision/lasr_vision_deepface/src/lasr_vision_deepface/deepface.py +++ b/common/vision/lasr_vision_deepface/src/lasr_vision_deepface/deepface.py @@ -5,14 +5,19 @@ import rospkg import rospy import os +import numpy as np +import pandas as pd from lasr_vision_msgs.msg import Detection from lasr_vision_msgs.srv import RecogniseRequest, RecogniseResponse +from sensor_msgs.msg import Image + DATASET_ROOT = os.path.join( rospkg.RosPack().get_path("lasr_vision_deepface"), "datasets" ) + Mat = int # np.typing.NDArray[np.uint8] @@ -28,11 +33,90 @@ def detect_face(cv_im: Mat) -> Mat | None: return None facial_area = faces[0]["facial_area"] x, y, w, h = facial_area["x"], facial_area["y"], facial_area["w"], facial_area["h"] + + # add padding to the face + x -= 10 + y -= 10 + w += 20 + h += 20 + return cv_im[:][y : y + h, x : x + w] +def create_image_collage(images, output_size=(640, 480)): + + # Calculate grid dimensions + num_images = len(images) + rows = int(np.sqrt(num_images)) + print(num_images, rows) + cols = (num_images + rows - 1) // rows # Ceiling division + + # Resize images to fit in the grid + resized_images = [ + cv2.resize(img, (output_size[0] // cols, output_size[1] // rows)) + for img in images + ] + + # Create the final image grid + grid_image = np.zeros((output_size[1], output_size[0], 3), dtype=np.uint8) + + # Populate the grid with resized images + for i in range(rows): + for j in range(cols): + idx = i * cols + j + if idx < num_images: + y_start = i * (output_size[1] // rows) + y_end = (i + 1) * (output_size[1] // rows) + x_start = j * (output_size[0] // cols) + x_end = (j + 1) * (output_size[0] // cols) + + grid_image[y_start:y_end, x_start:x_end] = resized_images[idx] + + return grid_image + + +def create_dataset( + topic: str, + dataset: str, + name: str, + size: int, + debug_publisher: rospy.Publisher | None, +) -> None: + dataset_path = os.path.join(DATASET_ROOT, dataset, name) + if not os.path.exists(dataset_path): + os.makedirs(dataset_path) + rospy.loginfo(f"Taking {size} pictures of {name} and saving to {dataset_path}") + + images = [] + for i in range(size): + img_msg = rospy.wait_for_message(topic, Image) + cv_im = cv2_img.msg_to_cv2_img(img_msg) + face_cropped_cv_im = detect_face(cv_im) + if face_cropped_cv_im is None: + continue + cv2.imwrite(os.path.join(dataset_path, f"{name}_{i+1}.png"), face_cropped_cv_im) # type: ignore + rospy.loginfo(f"Took picture {i+1}") + images.append(face_cropped_cv_im) + if debug_publisher is not None: + debug_publisher.publish( + cv2_img.cv2_img_to_msg(create_image_collage(images)) + ) + + # Force retraining + DeepFace.find( + cv_im, + os.path.join(DATASET_ROOT, dataset), + enforce_detection=False, + silent=True, + detector_backend="mtcnn", + ) + + def detect( - request: RecogniseRequest, debug_publisher: rospy.Publisher | None + request: RecogniseRequest, + debug_publisher: rospy.Publisher | None, + debug_inference_pub: rospy.Publisher | None, + cropped_detect_pub: rospy.Publisher | None, ) -> RecogniseResponse: # Decode the image rospy.loginfo("Decoding") @@ -49,7 +133,6 @@ def detect( enforce_detection=True, silent=True, detector_backend="mtcnn", - threshold=request.confidence, ) except ValueError: return response @@ -66,14 +149,19 @@ def detect( row["source_h"][0], ) detection.xywh = [x, y, w, h] - detection.confidence = 1.0 - row["distance"][0] + detection.confidence = row["distance"][0] response.detections.append(detection) + cropped_image = cv_im[:][y : y + h, x : x + w] + + if cropped_detect_pub is not None: + cropped_detect_pub.publish(cv2_img.cv2_img_to_msg(cropped_image)) + # Draw bounding boxes and labels for debugging cv2.rectangle(cv_im, (x, y), (x + w, y + h), (0, 0, 255), 2) cv2.putText( cv_im, - f"{detection.name} ({detection.confidence})", + f"{detection.name} Distance: ({detection.confidence:.2f})", (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, @@ -84,5 +172,16 @@ def detect( # publish to debug topic if debug_publisher is not None: debug_publisher.publish(cv2_img.cv2_img_to_msg(cv_im)) + if debug_inference_pub is not None: + result = pd.concat(result) + # check for empty result + if not result.empty: + result_paths = list(result["identity"]) + if len(result_paths) > 5: + result_paths = result_paths[:5] + result_images = [cv2.imread(path) for path in result_paths] + debug_inference_pub.publish( + cv2_img.cv2_img_to_msg(create_image_collage(result_images)) + ) return response diff --git a/common/vision/lasr_vision_msgs/CMakeLists.txt b/common/vision/lasr_vision_msgs/CMakeLists.txt index 95ebfab57..06608a45c 100644 --- a/common/vision/lasr_vision_msgs/CMakeLists.txt +++ b/common/vision/lasr_vision_msgs/CMakeLists.txt @@ -46,6 +46,7 @@ find_package(catkin REQUIRED COMPONENTS message_generation message_runtime senso add_message_files( FILES Detection.msg + Detection3D.msg BodyPixPose.msg BodyPixMask.msg BodyPixMaskRequest.msg @@ -58,18 +59,18 @@ add_message_files( add_service_files( FILES YoloDetection.srv + YoloDetection3D.srv BodyPixDetection.srv - TorchFaceFeatureDetection.srv + # TorchFaceFeatureDetection.srv + Recognise.srv + LearnFace.srv + Vqa.srv + PointingDirection.srv TorchFaceFeatureDetectionDescription.srv # Recognise.srv ) -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) +# Generate actions in the 'action' folder ## Generate added messages and services with any dependencies listed here generate_messages( diff --git a/common/vision/lasr_vision_msgs/msg/Detection3D.msg b/common/vision/lasr_vision_msgs/msg/Detection3D.msg new file mode 100644 index 000000000..de41b3451 --- /dev/null +++ b/common/vision/lasr_vision_msgs/msg/Detection3D.msg @@ -0,0 +1,18 @@ +# Detected Object Class +string name + +# How certain we are this is what we think it is +float32 confidence + +# Bounding box mask defined in pixel-space +int32[] xywh + +# Target frame +string target_frame + +# Segmentation mask defined in pixel-space +# +# This will be empty if a segmentation model was not used +int32[] xyseg + +geometry_msgs/Point point \ No newline at end of file diff --git a/common/vision/lasr_vision_msgs/srv/LearnFace.srv b/common/vision/lasr_vision_msgs/srv/LearnFace.srv new file mode 100644 index 000000000..5376e53d5 --- /dev/null +++ b/common/vision/lasr_vision_msgs/srv/LearnFace.srv @@ -0,0 +1,10 @@ +# Name to associate +string name + +# Dataset to add face to +string dataset + +# Number of images to take +int32 n_images + +--- diff --git a/common/vision/lasr_vision_msgs/srv/PointingDirection.srv b/common/vision/lasr_vision_msgs/srv/PointingDirection.srv new file mode 100644 index 000000000..416b86669 --- /dev/null +++ b/common/vision/lasr_vision_msgs/srv/PointingDirection.srv @@ -0,0 +1,5 @@ +# Image to run inference on + +sensor_msgs/Image image_raw +--- +string direction \ No newline at end of file diff --git a/common/vision/lasr_vision_msgs/srv/Vqa.srv b/common/vision/lasr_vision_msgs/srv/Vqa.srv new file mode 100644 index 000000000..2cb9a86f1 --- /dev/null +++ b/common/vision/lasr_vision_msgs/srv/Vqa.srv @@ -0,0 +1,9 @@ +string[] possible_answers + +--- + +# most likely answer +string answer + +# cosine similarity +float32 similarity \ No newline at end of file diff --git a/common/vision/lasr_vision_msgs/srv/YoloDetection3D.srv b/common/vision/lasr_vision_msgs/srv/YoloDetection3D.srv new file mode 100644 index 000000000..13e146fab --- /dev/null +++ b/common/vision/lasr_vision_msgs/srv/YoloDetection3D.srv @@ -0,0 +1,16 @@ +# PointCloud2 to run inference on +sensor_msgs/PointCloud2 pcl + +# YOLO model to use +string dataset + +# How certain the detection should be to include +float32 confidence + +# Non-maximum Supression +# +# Guiding value to remove overlapping bounding boxes +float32 nms +--- +# Detection result +lasr_vision_msgs/Detection3D[] detected_objects \ No newline at end of file diff --git a/legacy/cv_bridge3/CMakeLists.txt b/common/vision/lasr_vision_pointing/CMakeLists.txt similarity index 90% rename from legacy/cv_bridge3/CMakeLists.txt rename to common/vision/lasr_vision_pointing/CMakeLists.txt index 05f19fb93..66f1cf831 100644 --- a/legacy/cv_bridge3/CMakeLists.txt +++ b/common/vision/lasr_vision_pointing/CMakeLists.txt @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 3.0.2) -project(cv_bridge3) +project(lasr_vision_pointing) ## Compile as C++11, supported in ROS Kinetic and newer # add_compile_options(-std=c++11) @@ -7,7 +7,14 @@ project(cv_bridge3) ## Find catkin macros and libraries ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) ## is used, also find other catkin packages -find_package(catkin REQUIRED catkin_virtualenv) +find_package(catkin REQUIRED COMPONENTS + geometry_msgs + sensor_msgs + roscpp + rospy + std_msgs + catkin_virtualenv +) ## System dependencies are found with CMake's conventions # find_package(Boost REQUIRED COMPONENTS system) @@ -56,8 +63,7 @@ catkin_generate_virtualenv( ## Generate services in the 'srv' folder # add_service_files( # FILES -# Service1.srv -# Service2.srv +# PointingDirection.srv # ) ## Generate actions in the 'action' folder @@ -70,7 +76,9 @@ catkin_generate_virtualenv( ## Generate added messages and services with any dependencies listed here # generate_messages( # DEPENDENCIES -# std_msgs # Or other packages containing msgs +# geometry_msgs +# std_msgs +# sensor_msgs # ) ################################################ @@ -104,8 +112,8 @@ catkin_generate_virtualenv( ## DEPENDS: system dependencies of this project that dependent projects also need catkin_package( # INCLUDE_DIRS include -# LIBRARIES cv_bridge3 -# CATKIN_DEPENDS other_catkin_pkg +# LIBRARIES lasr_vision_pointing + CATKIN_DEPENDS geometry_msgs roscpp rospy std_msgs sensor_msgs # DEPENDS system_lib ) @@ -117,12 +125,12 @@ catkin_package( ## Your package locations should be listed before other locations include_directories( # include -# ${catkin_INCLUDE_DIRS} + ${catkin_INCLUDE_DIRS} ) ## Declare a C++ library # add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/cv_bridge3.cpp +# src/${PROJECT_NAME}/lasr_vision_pointing.cpp # ) ## Add cmake target dependencies of the library @@ -133,7 +141,7 @@ include_directories( ## Declare a C++ executable ## With catkin_make all packages are built within a single CMake context ## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/cv_bridge3_node.cpp) +# add_executable(${PROJECT_NAME}_node src/lasr_vision_pointing_node.cpp) ## Rename C++ executable without prefix ## The above recommended prefix causes long target names, the following renames the @@ -159,10 +167,10 @@ include_directories( ## Mark executable scripts (Python etc.) for installation ## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) +catkin_install_python(PROGRAMS + scripts/detect_pointing.py + DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +) ## Mark executables for installation ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html @@ -187,7 +195,7 @@ include_directories( ## Mark other files for installation (e.g. launch and bag files, etc.) install(FILES - requirements.txt + requirements.txt DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} ) @@ -196,7 +204,7 @@ install(FILES ############# ## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_cv_bridge3.cpp) +# catkin_add_gtest(${PROJECT_NAME}-test test/test_lasr_vision_pointing.cpp) # if(TARGET ${PROJECT_NAME}-test) # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) # endif() diff --git a/legacy/lasr_dialogflow/src/lasr_dialogflow/__init__.py b/common/vision/lasr_vision_pointing/images/.gitkeep similarity index 100% rename from legacy/lasr_dialogflow/src/lasr_dialogflow/__init__.py rename to common/vision/lasr_vision_pointing/images/.gitkeep diff --git a/common/helpers/tf_module/package.xml b/common/vision/lasr_vision_pointing/package.xml similarity index 87% rename from common/helpers/tf_module/package.xml rename to common/vision/lasr_vision_pointing/package.xml index ef34656ee..d99ef49f8 100644 --- a/common/helpers/tf_module/package.xml +++ b/common/vision/lasr_vision_pointing/package.xml @@ -1,13 +1,13 @@ - tf_module + lasr_vision_pointing 0.0.0 - The tf_module package + The lasr_vision_pointing package - nicole + Serge Alkhalil @@ -19,7 +19,7 @@ - + @@ -48,29 +48,30 @@ - + catkin geometry_msgs - message_generation roscpp rospy - sensor_msgs std_msgs + sensor_msgs geometry_msgs roscpp rospy - sensor_msgs std_msgs geometry_msgs roscpp rospy - sensor_msgs std_msgs - catkin + sensor_msgs + lasr_vision_msgs + + catkin_virtualenv + requirements.txt diff --git a/common/vision/lasr_vision_pointing/requirements.in b/common/vision/lasr_vision_pointing/requirements.in new file mode 100644 index 000000000..c1b708709 --- /dev/null +++ b/common/vision/lasr_vision_pointing/requirements.in @@ -0,0 +1 @@ +mediapipe==0.10.10 \ No newline at end of file diff --git a/common/vision/lasr_vision_pointing/requirements.txt b/common/vision/lasr_vision_pointing/requirements.txt new file mode 100644 index 000000000..f3bb12a4b --- /dev/null +++ b/common/vision/lasr_vision_pointing/requirements.txt @@ -0,0 +1,24 @@ +absl-py==2.1.0 # via mediapipe +attrs==23.2.0 # via mediapipe +cffi==1.16.0 # via sounddevice +contourpy==1.2.0 # via matplotlib +cycler==0.12.1 # via matplotlib +flatbuffers==24.3.7 # via mediapipe +fonttools==4.50.0 # via matplotlib +jax==0.4.25 # via mediapipe +kiwisolver==1.4.5 # via matplotlib +matplotlib==3.8.3 # via mediapipe +mediapipe==0.10.10 # via -r requirements.in +ml-dtypes==0.3.2 # via jax +numpy==1.26.4 # via contourpy, jax, matplotlib, mediapipe, ml-dtypes, opencv-contrib-python, opt-einsum, scipy +opencv-contrib-python==4.9.0.80 # via mediapipe +opt-einsum==3.3.0 # via jax +packaging==24.0 # via matplotlib +pillow==10.2.0 # via matplotlib +protobuf==3.20.3 # via mediapipe +pycparser==2.21 # via cffi +pyparsing==3.1.2 # via matplotlib +python-dateutil==2.9.0.post0 # via matplotlib +scipy==1.12.0 # via jax +six==1.16.0 # via python-dateutil +sounddevice==0.4.6 # via mediapipe diff --git a/common/vision/lasr_vision_pointing/scripts/detect_pointing.py b/common/vision/lasr_vision_pointing/scripts/detect_pointing.py new file mode 100644 index 000000000..a3f1d64d4 --- /dev/null +++ b/common/vision/lasr_vision_pointing/scripts/detect_pointing.py @@ -0,0 +1,147 @@ +#!/usr/bin/env python3 + +import os +import cv2 +import rospkg +import rospy +import mediapipe as mp +from cv_bridge import CvBridge +from lasr_vision_msgs.srv import YoloDetectionRequest, YoloDetection, PointingDirection, PointingDirectionResponse, PointingDirectionRequest + +class PointingDetector: + def __init__(self): + self.detect_service = rospy.ServiceProxy('/yolov8/detect', YoloDetection) + self.service = rospy.Service('/pointing_detection_service', PointingDirection, self.excute) + self.counter = 0 + + # Load MediaPipe Pose model + self.mp_pose = mp.solutions.pose + self.pose = self.mp_pose.Pose() + + def excute(self, req: PointingDirectionRequest) -> PointingDirectionResponse: + img = req.image_raw + resp = PointingDirectionResponse() + people_bounding_box = self.detection(img) + + if people_bounding_box: + for person in people_bounding_box: + keypoints = self.detect_keypoints(img) # Detect keypoints using MediaPipe + direction = self.determine_pointing_direction(keypoints) + rospy.loginfo(f"Person detected pointing: {direction}") + + # Visualize pointing direction with landmarks + image_path = os.path.join(rospkg.RosPack().get_path('lasr_vision_pointing'), 'images', f'image{self.counter}.jpg') + self.visualize_pointing_direction_with_landmarks(image_path, person, direction, keypoints) + + resp.direction = direction + else: + resp.direction = "Err" + + self.counter += 1 + return resp + + def store_image(self, img): + package_path = rospkg.RosPack().get_path('lasr_vision_pointing') + os.chdir(os.path.abspath(os.path.join(package_path, 'images'))) + cv2.imwrite(f"image{self.counter}.jpg", img) + + def detect_keypoints(self, img): + img_bridge = CvBridge().imgmsg_to_cv2(img, desired_encoding="passthrough") + img_rgb = cv2.cvtColor(img_bridge, cv2.COLOR_BGR2RGB) + + # store the image + self.store_image(img_rgb) + + results = self.pose.process(img_rgb) + + keypoints = [] + if results.pose_landmarks: + for landmark in results.pose_landmarks.landmark: + x = int(landmark.x * img.width) + y = int(landmark.y * img.height) + keypoints.append((x, y)) + + return keypoints + + def detection(self, img): + request = YoloDetectionRequest() + request.image_raw = img # sensor_msgs/Image + request.dataset = "yolov8n-seg.pt" # YOLOv8 model, auto-downloads + request.confidence = 0.7 # minimum confidence to include in results + request.nms = 0.4 # non maximal supression + + # send request + response = self.detect_service(request) + + result = [] + for detection in response.detected_objects: + if detection.name == "person": + # cords of person in image + result.append(detection.xywh) + + return result + + def determine_pointing_direction(self, keypoints, buffer_width=50): + # Ensure keypoints are available + if len(keypoints) >= 7: # Ensure we have at least 7 keypoints for the upper body + # Extract relevant keypoints for shoulders and wrists + left_shoulder = keypoints[self.mp_pose.PoseLandmark.LEFT_SHOULDER.value] + right_shoulder = keypoints[self.mp_pose.PoseLandmark.RIGHT_SHOULDER.value] + left_wrist = keypoints[self.mp_pose.PoseLandmark.LEFT_WRIST.value] + right_wrist = keypoints[self.mp_pose.PoseLandmark.RIGHT_WRIST.value] + + # Ensure all keypoints are detected + if left_shoulder and right_shoulder and left_wrist and right_wrist: + # Calculate the x-coordinate difference between shoulders and wrists + left_diff = left_wrist[0] - left_shoulder[0] + right_diff = right_shoulder[0] - right_wrist[0] + + # Determine pointing direction based on the difference in x-coordinates + if abs(left_diff - right_diff) < buffer_width: + return "Front" + elif abs(left_diff) > buffer_width and abs(left_diff) > abs(right_diff): + return "Left" if left_diff > 0 else "Right" + elif abs(right_diff) > buffer_width and abs(right_diff) > abs(left_diff): + return "Right" if right_diff > 0 else "Left" + + # Default: Determine direction based on the relative position to the center of the image + return "Front" + + def visualize_pointing_direction_with_landmarks(self, image_path, person_bbox, pointing_direction, keypoints): + # Load image + img = cv2.imread(image_path) + + # Extract person bbox coordinates + x, y, w, h = person_bbox + # Calculate center of bbox + center_x = x + w // 2 + center_y = y + h // 2 + + # Calculate endpoint of arrow based on pointing direction + arrow_length = min(w, h) // 2 + if pointing_direction == "Left": + endpoint = (center_x - arrow_length, center_y) + elif pointing_direction == "Right": + endpoint = (center_x + arrow_length, center_y) + else: + endpoint = (center_x, center_y) + + # Draw arrow on image + color = (0, 255, 0) # Green color + thickness = 2 + cv2.arrowedLine(img, (center_x, center_y), endpoint, color, thickness) + + # Draw landmarks (keypoints) on the image + for keypoint in keypoints: + if keypoint: + cv2.circle(img, keypoint, 3, (0, 0, 255), -1) # Red color for landmarks + + # Store image with pointing direction visualization and landmarks + output_image_path = f"landmark_image{self.counter}.jpg" # Change the output image path as needed + cv2.imwrite(output_image_path, img) + +if __name__ == '__main__': + rospy.init_node("pointing_detector") + pointer = PointingDetector() + rospy.loginfo("Pointing Detector is running") + rospy.spin() \ No newline at end of file diff --git a/legacy/lasr_dialogflow/setup.py b/common/vision/lasr_vision_pointing/setup.py similarity index 84% rename from legacy/lasr_dialogflow/setup.py rename to common/vision/lasr_vision_pointing/setup.py index 3d36d5694..627e86f08 100644 --- a/legacy/lasr_dialogflow/setup.py +++ b/common/vision/lasr_vision_pointing/setup.py @@ -4,7 +4,7 @@ from catkin_pkg.python_setup import generate_distutils_setup setup_args = generate_distutils_setup( - packages=['lasr_dialogflow'], + packages=['lasr_vision_pointing'], package_dir={'': 'src'} ) diff --git a/common/vision/lasr_vision_yolov8/CMakeLists.txt b/common/vision/lasr_vision_yolov8/CMakeLists.txt index 0ab61134d..e7599fef7 100644 --- a/common/vision/lasr_vision_yolov8/CMakeLists.txt +++ b/common/vision/lasr_vision_yolov8/CMakeLists.txt @@ -19,7 +19,7 @@ find_package(catkin REQUIRED catkin_virtualenv) catkin_python_setup() catkin_generate_virtualenv( INPUT_REQUIREMENTS requirements.in - PYTHON_INTERPRETER python3.10 + PYTHON_INTERPRETER python3.8 ) ################################################ @@ -162,6 +162,7 @@ include_directories( catkin_install_python(PROGRAMS nodes/service examples/relay + examples/relay_3d examples/construct_mask DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} ) diff --git a/common/vision/lasr_vision_yolov8/examples/relay_3d b/common/vision/lasr_vision_yolov8/examples/relay_3d new file mode 100644 index 000000000..1ffc9f758 --- /dev/null +++ b/common/vision/lasr_vision_yolov8/examples/relay_3d @@ -0,0 +1,56 @@ +#!/usr/bin/env python3 + +import sys +import rospy +import threading + +from sensor_msgs.msg import PointCloud2 +from lasr_vision_msgs.srv import YoloDetection3D, YoloDetection3DRequest + +# figure out what model we are using +if len(sys.argv) >= 2: + model = sys.argv[1] +else: + model = "yolov8n-seg.pt" + +processing = False + + +def detect(pcl): + global processing + processing = True + rospy.loginfo("Received pcl message") + + try: + detect_service = rospy.ServiceProxy("/yolov8/detect3d", YoloDetection3D) + req = YoloDetection3DRequest() + req.pcl = pcl + req.dataset = model + req.confidence = 0.25 + req.nms = 0.4 + resp = detect_service(req) + print(resp) + except rospy.ServiceException as e: + rospy.logerr("Service call failed: %s" % e) + finally: + processing = False + + +def pcl_callback(pcl): + global processing + if processing: + return + + t = threading.Thread(target=detect, args=(pcl,)) + t.start() + + +def listener(): + rospy.init_node("pcl_listener", anonymous=True) + rospy.wait_for_service("/yolov8/detect3d") + rospy.Subscriber("/xtion/depth_registered/points", PointCloud2, pcl_callback) + rospy.spin() + + +if __name__ == "__main__": + listener() \ No newline at end of file diff --git a/common/vision/lasr_vision_yolov8/nodes/service b/common/vision/lasr_vision_yolov8/nodes/service index 301f1b9c3..b949873b3 100644 --- a/common/vision/lasr_vision_yolov8/nodes/service +++ b/common/vision/lasr_vision_yolov8/nodes/service @@ -6,21 +6,31 @@ import rospkg import lasr_vision_yolov8 as yolo from sensor_msgs.msg import Image -from lasr_vision_msgs.srv import YoloDetection, YoloDetectionRequest, YoloDetectionResponse +from visualization_msgs.msg import Marker + +from lasr_vision_msgs.srv import ( + YoloDetection, + YoloDetectionRequest, + YoloDetectionResponse, + YoloDetection3D, + YoloDetection3DRequest, + YoloDetection3DResponse, +) # Put ourselves in the model folder import os import rospkg + rp = rospkg.RosPack() -package_path = rp.get_path('lasr_vision_yolov8') -os.chdir(os.path.abspath(os.path.join(package_path, 'models'))) +package_path = rp.get_path("lasr_vision_yolov8") +os.chdir(os.path.abspath(os.path.join(package_path, "models"))) # Initialise rospy -rospy.init_node('yolov8_service') +rospy.init_node("yolov8_service") # Determine variables -DEBUG = rospy.get_param('~debug', False) -PRELOAD = rospy.get_param('~preload', []) +DEBUG = rospy.get_param("~debug", False) +PRELOAD = rospy.get_param("~preload", []) for model in PRELOAD: yolo.load_model(model) @@ -28,21 +38,49 @@ for model in PRELOAD: # Prepare publisher debug_publishers = {} if DEBUG: - debug_publisher = rospy.Publisher('/yolov8/debug', Image, queue_size=1) + debug_publisher = rospy.Publisher("/yolov8/debug", Image, queue_size=1) + def detect(request: YoloDetectionRequest) -> YoloDetectionResponse: - ''' + """ Hand off detection request to yolo library - ''' + """ debug_publisher = None if DEBUG: if request.dataset in debug_publishers: debug_publisher = debug_publishers[request.dataset] else: - topic_name = re.sub(r'[\W_]+', '', request.dataset) - debug_publisher = rospy.Publisher(f'/yolov8/debug/{topic_name}', Image, queue_size=1) + topic_name = re.sub(r"[\W_]+", "", request.dataset) + debug_publisher = rospy.Publisher( + f"/yolov8/debug/{topic_name}", Image, queue_size=1 + ) return yolo.detect(request, debug_publisher) -rospy.Service('/yolov8/detect', YoloDetection, detect) -rospy.loginfo('YOLOv8 service started') + +def detect_3d(request: YoloDetection3DRequest) -> YoloDetection3DResponse: + """ + Hand off detection request to yolo library + """ + debug_inference_publisher, debug_point_publisher = None, None + if DEBUG: + if request.dataset in debug_publishers: + debug_inference_publisher, debug_point_publisher = debug_publishers[ + request.dataset + ] + else: + topic_name = re.sub(r"[\W_]+", "", request.dataset) + debug_inference_publisher = rospy.Publisher( + f"/yolov8/debug/{topic_name}", Image, queue_size=1 + ) + debug_point_publisher = rospy.Publisher( + f"/yolov8/debug/points", Marker, queue_size=10 + ) + + return yolo.detect_3d(request, debug_inference_publisher, debug_point_publisher) + + +yolo.start_tf_buffer() +rospy.Service("/yolov8/detect", YoloDetection, detect) +rospy.Service("/yolov8/detect3d", YoloDetection3D, detect_3d) +rospy.loginfo("YOLOv8 service started") rospy.spin() diff --git a/common/vision/lasr_vision_yolov8/package.xml b/common/vision/lasr_vision_yolov8/package.xml index aeb1be4d2..60276d450 100644 --- a/common/vision/lasr_vision_yolov8/package.xml +++ b/common/vision/lasr_vision_yolov8/package.xml @@ -51,7 +51,9 @@ catkin catkin_virtualenv lasr_vision_msgs - + markers + cv2_img + cv2_pcl diff --git a/common/vision/lasr_vision_yolov8/requirements.txt b/common/vision/lasr_vision_yolov8/requirements.txt index 99f31cbbf..e25cdeb1e 100644 --- a/common/vision/lasr_vision_yolov8/requirements.txt +++ b/common/vision/lasr_vision_yolov8/requirements.txt @@ -1,55 +1,53 @@ -certifi==2023.7.22 # via requests -charset-normalizer==3.2.0 # via requests -cmake==3.27.4.1 # via triton -contourpy==1.1.0 # via matplotlib -cycler==0.11.0 # via matplotlib +certifi==2024.2.2 # via requests +charset-normalizer==3.3.2 # via requests +contourpy==1.1.1 # via matplotlib +cycler==0.12.1 # via matplotlib dill==0.3.7 # via -r requirements.in -filelock==3.12.3 # via torch, triton -fonttools==4.42.1 # via matplotlib -idna==3.4 # via requests -jinja2==3.1.2 # via torch +filelock==3.13.1 # via torch, triton +fonttools==4.49.0 # via matplotlib +fsspec==2024.2.0 # via torch +idna==3.6 # via requests +importlib-resources==6.1.2 # via matplotlib +jinja2==3.1.3 # via torch kiwisolver==1.4.5 # via matplotlib -lit==16.0.6 # via triton -markupsafe==2.1.3 # via jinja2 -matplotlib==3.7.3 # via seaborn, ultralytics +markupsafe==2.1.5 # via jinja2 +matplotlib==3.7.5 # via seaborn, ultralytics mpmath==1.3.0 # via sympy networkx==3.1 # via torch -numpy==1.25.2 # via contourpy, matplotlib, opencv-python, pandas, scipy, seaborn, torchvision, ultralytics -nvidia-cublas-cu11==11.10.3.66 # via nvidia-cudnn-cu11, nvidia-cusolver-cu11, torch -nvidia-cuda-cupti-cu11==11.7.101 # via torch -nvidia-cuda-nvrtc-cu11==11.7.99 # via torch -nvidia-cuda-runtime-cu11==11.7.99 # via torch -nvidia-cudnn-cu11==8.5.0.96 # via torch -nvidia-cufft-cu11==10.9.0.58 # via torch -nvidia-curand-cu11==10.2.10.91 # via torch -nvidia-cusolver-cu11==11.4.0.1 # via torch -nvidia-cusparse-cu11==11.7.4.91 # via torch -nvidia-nccl-cu11==2.14.3 # via torch -nvidia-nvtx-cu11==11.7.91 # via torch -opencv-python==4.8.0.76 # via ultralytics -packaging==23.1 # via matplotlib -pandas==2.1.0 # via seaborn, ultralytics -pillow==10.0.0 # via matplotlib, torchvision, ultralytics -psutil==5.9.5 # via ultralytics +numpy==1.24.4 # via contourpy, matplotlib, opencv-python, pandas, scipy, seaborn, torchvision, ultralytics +nvidia-cublas-cu12==12.1.3.1 # via nvidia-cudnn-cu12, nvidia-cusolver-cu12, torch +nvidia-cuda-cupti-cu12==12.1.105 # via torch +nvidia-cuda-nvrtc-cu12==12.1.105 # via torch +nvidia-cuda-runtime-cu12==12.1.105 # via torch +nvidia-cudnn-cu12==8.9.2.26 # via torch +nvidia-cufft-cu12==11.0.2.54 # via torch +nvidia-curand-cu12==10.3.2.106 # via torch +nvidia-cusolver-cu12==11.4.5.107 # via torch +nvidia-cusparse-cu12==12.1.0.106 # via nvidia-cusolver-cu12, torch +nvidia-nccl-cu12==2.19.3 # via torch +nvidia-nvjitlink-cu12==12.4.99 # via nvidia-cusolver-cu12, nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 # via torch +opencv-python==4.9.0.80 # via ultralytics +packaging==23.2 # via matplotlib +pandas==2.0.3 # via seaborn, ultralytics +pillow==10.2.0 # via matplotlib, torchvision, ultralytics +psutil==5.9.8 # via ultralytics py-cpuinfo==9.0.0 # via ultralytics -pyparsing==3.1.1 # via matplotlib -python-dateutil==2.8.2 # via matplotlib, pandas -pytz==2023.3.post1 # via pandas +pyparsing==3.1.2 # via matplotlib +python-dateutil==2.9.0.post0 # via matplotlib, pandas +pytz==2024.1 # via pandas pyyaml==6.0.1 # via ultralytics -requests==2.31.0 # via torchvision, ultralytics -scipy==1.11.2 # via ultralytics -seaborn==0.12.2 # via ultralytics +requests==2.31.0 # via ultralytics +scipy==1.10.1 # via ultralytics +seaborn==0.13.2 # via ultralytics six==1.16.0 # via python-dateutil sympy==1.12 # via torch -torch==2.0.1 # via torchvision, triton, ultralytics -torchvision==0.15.2 # via ultralytics -tqdm==4.66.1 # via ultralytics -triton==2.0.0 # via torch -typing-extensions==4.7.1 # via filelock, torch -tzdata==2023.3 # via pandas +torch==2.2.1 # via torchvision, ultralytics +torchvision==0.17.1 # via ultralytics +tqdm==4.66.2 # via ultralytics +triton==2.2.0 # via torch +typing-extensions==4.10.0 # via torch +tzdata==2024.1 # via pandas ultralytics==8.0.168 # via -r requirements.in -urllib3==2.0.4 # via requests -wheel==0.41.2 # via nvidia-cublas-cu11, nvidia-cuda-cupti-cu11, nvidia-cuda-runtime-cu11, nvidia-curand-cu11, nvidia-cusparse-cu11, nvidia-nvtx-cu11 - -# The following packages are considered to be unsafe in a requirements file: -# setuptools +urllib3==2.2.1 # via requests +zipp==3.17.0 # via importlib-resources diff --git a/common/vision/lasr_vision_yolov8/src/lasr_vision_yolov8/__init__.py b/common/vision/lasr_vision_yolov8/src/lasr_vision_yolov8/__init__.py index 7cc994930..2bb3418c5 100644 --- a/common/vision/lasr_vision_yolov8/src/lasr_vision_yolov8/__init__.py +++ b/common/vision/lasr_vision_yolov8/src/lasr_vision_yolov8/__init__.py @@ -1 +1 @@ -from .yolo import detect, load_model +from .yolo import detect, detect_3d, load_model, start_tf_buffer diff --git a/common/vision/lasr_vision_yolov8/src/lasr_vision_yolov8/yolo.py b/common/vision/lasr_vision_yolov8/src/lasr_vision_yolov8/yolo.py index d197c28ab..342223979 100644 --- a/common/vision/lasr_vision_yolov8/src/lasr_vision_yolov8/yolo.py +++ b/common/vision/lasr_vision_yolov8/src/lasr_vision_yolov8/yolo.py @@ -1,51 +1,74 @@ import rospy import cv2_img +import cv2_pcl import numpy as np -from PIL import Image from ultralytics import YOLO -from sensor_msgs.msg import Image as SensorImage -from lasr_vision_msgs.msg import Detection -from lasr_vision_msgs.srv import YoloDetectionRequest, YoloDetectionResponse +from lasr_vision_msgs.msg import Detection, Detection3D +from lasr_vision_msgs.srv import ( + YoloDetectionRequest, + YoloDetectionResponse, + YoloDetection3DRequest, + YoloDetection3DResponse, +) + +import markers + +from geometry_msgs.msg import Point, PointStamped + +import tf2_ros as tf +import tf2_geometry_msgs # noqa + +# global tf buffer +tf_buffer = tf.Buffer(cache_time=rospy.Duration(10)) + + +def start_tf_buffer() -> None: + tf.TransformListener(tf_buffer) + # model cache loaded_models = {} + def load_model(dataset: str) -> None: - ''' + """ Load a model into cache - ''' + """ model = None if dataset in loaded_models: model = loaded_models[dataset] else: model = YOLO(dataset) - rospy.loginfo(f'Loaded {dataset} model') + rospy.loginfo(f"Loaded {dataset} model") loaded_models[dataset] = model - + return model -def detect(request: YoloDetectionRequest, debug_publisher: rospy.Publisher | None) -> YoloDetectionResponse: - ''' + +def detect( + request: YoloDetectionRequest, debug_publisher: rospy.Publisher +) -> YoloDetectionResponse: + """ Run YOLO inference on given detection request - ''' + """ # decode the image - rospy.loginfo('Decoding') + rospy.loginfo("Decoding") img = cv2_img.msg_to_pillow_img(request.image_raw) - + # load model - rospy.loginfo('Loading model') + rospy.loginfo("Loading model") model = load_model(request.dataset) # run inference - rospy.loginfo('Running inference') + rospy.loginfo("Running inference") results = model(img, conf=request.confidence, iou=request.nms) result = results[0] - rospy.loginfo('Inference complete') + rospy.loginfo("Inference complete") # construct response detected_objects = [] @@ -62,11 +85,80 @@ def detect(request: YoloDetectionRequest, debug_publisher: rospy.Publisher | Non detection.xyseg = result.masks.xy[i].flatten().astype(int).tolist() detected_objects.append(detection) - + # publish to debug topic if debug_publisher is not None: debug_publisher.publish(cv2_img.cv2_img_to_msg(result.plot())) - + response = YoloDetectionResponse() response.detected_objects = detected_objects return response + + +def detect_3d( + request: YoloDetection3DRequest, + debug_inference_publisher: rospy.Publisher, + debug_point_publisher: rospy.Publisher, +) -> YoloDetection3DResponse: + """ + Run YOLO 3D inference on given detection request + """ + + # Extract rgb image from pointcloud + rospy.loginfo("Decoding") + img = cv2_pcl.pcl_to_cv2(request.pcl) + + # load model + rospy.loginfo("Loading model") + model = load_model(request.dataset) + + # run inference + rospy.loginfo("Running inference") + results = model(img, conf=request.confidence, iou=request.nms) + result = results[0] + rospy.loginfo("Inference complete") + + # construct response + detected_objects = [] + object_count = result.boxes.cls.size(dim=0) + has_segment_masks = result.masks is not None + for i in range(0, object_count): + detection = Detection3D() + detection.name = result.names[int(result.boxes.cls[i])] + detection.confidence = float(result.boxes.conf.cpu().numpy()[i]) + detection.xywh = result.boxes.xywh[i].cpu().numpy().astype(int).tolist() + + # copy segmented mask if available, and estimate 3D position + if has_segment_masks: + detection.xyseg = result.masks.xy[i].flatten().astype(int).tolist() + + centroid = cv2_pcl.seg_to_centroid(request.pcl, np.array(detection.xyseg)) + + point_stamped = PointStamped() + point_stamped.point = Point(*centroid) + point_stamped.header.frame_id = request.pcl.header.frame_id + point_stamped.header.stamp = rospy.Time(0) + + # TODO: handle tf errors properly + while not rospy.is_shutdown(): + try: + point_stamped = tf_buffer.transform(point_stamped, "map") + detection.point = point_stamped.point + break + except Exception as e: + rospy.logerr(e) + continue + + # publish to debug topic + if debug_point_publisher is not None: + markers.create_and_publish_marker(debug_point_publisher, point_stamped) + + detected_objects.append(detection) + + # publish to debug topic + if debug_inference_publisher is not None: + debug_inference_publisher.publish(cv2_img.cv2_img_to_msg(result.plot())) + + response = YoloDetection3DResponse() + response.detected_objects = detected_objects + return response diff --git a/legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/__init__.py b/legacy/aruco_service/CATKIN_IGNORE similarity index 100% rename from legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/__init__.py rename to legacy/aruco_service/CATKIN_IGNORE diff --git a/legacy/lasr_object_detection_yolo/__init__.py b/legacy/choosing_wait_position/CATKIN_IGNORE similarity index 100% rename from legacy/lasr_object_detection_yolo/__init__.py rename to legacy/choosing_wait_position/CATKIN_IGNORE diff --git a/legacy/common_math/CMakeLists.txt b/legacy/common_math/CMakeLists.txt deleted file mode 100644 index 934b0ad8d..000000000 --- a/legacy/common_math/CMakeLists.txt +++ /dev/null @@ -1,212 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(common_math) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED - actionlib - actionlib_msgs - geometry_msgs - message_generation - rospy - std_msgs - std_srvs - sensor_msgs -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html - catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder -add_service_files( - FILES - TfTransform.srv -) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -generate_messages(DEPENDENCIES - actionlib_msgs - geometry_msgs - sensor_msgs - std_msgs -) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES common_math -# CATKIN_DEPENDS other_catkin_pkg -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include -# ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/common_math.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/common_math_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_common_math.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/legacy/common_math/README.md b/legacy/common_math/README.md deleted file mode 100644 index 5d7e2598f..000000000 --- a/legacy/common_math/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# common_math - -The common_math package - -This package is maintained by: -- [jared](mailto:jared@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- actionlib (build) -- actionlib_msgs (build) -- geometry_msgs (build) -- message_generation (build) -- rospy (build) -- std_msgs (build) -- std_srvs (build) -- actionlib (exec) -- actionlib_msgs (exec) -- geometry_msgs (exec) -- rospy (exec) -- std_msgs (exec) -- std_srvs (exec) -- face_detection (exec) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -This package has no launch files. - -### Messages - -This package has no messages. - -### Services - -#### `TfTransform` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| pose_array | geometry_msgs/PoseArray | | -| pointcloud | sensor_msgs/PointCloud2 | | -| point | geometry_msgs/PointStamped | | -| target_frame | std_msgs/String | | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| target_pose_array | geometry_msgs/PoseArray | | -| target_pointcloud | sensor_msgs/PointCloud2 | | -| target_point | geometry_msgs/PointStamped | | - - -### Actions - -This package has no actions. diff --git a/legacy/common_math/package.xml b/legacy/common_math/package.xml deleted file mode 100644 index c0c4b2acb..000000000 --- a/legacy/common_math/package.xml +++ /dev/null @@ -1,78 +0,0 @@ - - - common_math - 0.0.0 - The common_math package - - - - - jared - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - actionlib - actionlib_msgs - geometry_msgs - message_generation - rospy - std_msgs - std_srvs - actionlib - actionlib_msgs - geometry_msgs - rospy - std_msgs - std_srvs - actionlib - actionlib_msgs - geometry_msgs - rospy - std_msgs - std_srvs - face_detection - - - - - - - diff --git a/legacy/common_math/src/common_math/__init__.py b/legacy/common_math/src/common_math/__init__.py deleted file mode 100644 index d06a271a9..000000000 --- a/legacy/common_math/src/common_math/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .math_ import * diff --git a/legacy/common_math/src/common_math/helpers/common_math_helpers.py b/legacy/common_math/src/common_math/helpers/common_math_helpers.py deleted file mode 100644 index 867ce5257..000000000 --- a/legacy/common_math/src/common_math/helpers/common_math_helpers.py +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env python3 - -import rospy -from geometry_msgs.msg import PoseWithCovarianceStamped -from tiago_controllers.helpers.pose_helpers import get_pose_from_param -from common_math.math_ import euclidian_distance - - - -def get_dist_to_door(is_robot, x=None, y=None): - if is_robot: - robot_pose = rospy.wait_for_message("/robot_pose", PoseWithCovarianceStamped) - print(f"robot pose: {robot_pose}") - r = (robot_pose.pose.pose.position.x, robot_pose.pose.pose.position.y) - else: - r = (x, y) - - door_position = get_pose_from_param("/door/pose") - print(f"door pose: {door_position}") - d = (door_position.position.x, door_position.position.y) - - dist = euclidian_distance(r, d) - print(f"distance to door: {dist}") - return dist - -# def get_how_close_to_door(is_robot, min_dist=0.5): -# dist = self.get_dist_to_door(is_robot) -# return round(dist, 1) < min_dist diff --git a/legacy/common_math/src/common_math/math.py b/legacy/common_math/src/common_math/math.py deleted file mode 100755 index 433458881..000000000 --- a/legacy/common_math/src/common_math/math.py +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/python -import numpy as np -import rospy -import numpy as np -# import ros_numpy as rnp -from geometry_msgs.msg import PointStamped, Point -from std_msgs.msg import Header -from common_math.srv import TfTransform, TfTransformRequest - -def pcl_msg_to_cv2(pcl_msg): - """ - Constructs a cv2 image from a PointCloud2 message. - - Parameters - ---------- - pcl_msg : sensor_msgs/PointCloud2 - Input pointcloud (organised) - - Returns - ------- - np.array : cv2 image - """ - - # Extract rgb image from pointcloud - frame = np.fromstring(pcl_msg.data, dtype=np.uint8) - frame = frame.reshape(pcl_msg.height, pcl_msg.width, 32) - frame = frame[:,:,16:19] - - # Ensure array is contiguous - frame = np.ascontiguousarray(frame, dtype=np.uint8) - - return frame - - -def tf_transform(target_frame, pose_array=None, pointcloud=None, point=None): - """ - Transforms the given message to the target frame. - Parameters: - target_frame {frame_id} -- frame to transform to - pose_array {PoseArray} -- array of poses - pointcloud {PointCloud2} - point {PointStamped} - - Returns: - response {TfTransformResponse} -- target_pose_array {PoseArray}, target_pointcloud {PointCloud2}, target_point {PointStamped} - """ - - assert pose_array is not None or pointcloud is not None or point is not None - - rospy.wait_for_service('tf_transform', timeout=10) - try: - tf_transform_srv = rospy.ServiceProxy('tf_transform', TfTransform) - request = TfTransformRequest() - if pose_array is not None: - request.pose_array = pose_array - if pointcloud is not None: - request.pointcloud = pointcloud - if point is not None: - request.point = point - request.target_frame.data = target_frame - response = tf_transform_srv(request) - # print('transform done!') - return response - except rospy.ServiceException as e: - print("Service call failed: %s" % e) -def bb_to_centroid(pcl_msg, x, y, w, h): - """ - Computes a centroid in the map plane from a given bounding box. - Performs this by extracting the bounding box from the pointcloud, - and computing the centroid from that. - - Parameters - ---------- - pcl_msg : sensor_msgs/PointCloud2 - Input pointcloud (organised). - x : float - Bottom left x of bounding box. - y : float - Bottom left y of bounding box. - w : float - Width of bounding box. - h : float - Height of bounding box. - - Returns - ------- - geometry_msgs/PointStamped : centroid - """ - - # Convert xywh to bounding box coordinates. - x1, y1, x2, y2 = x, y, x + w, y + h - - # cv2 image - frame = pcl_msg_to_cv2(pcl_msg) - - # Compute mask from bounding box - mask = np.zeros(shape=frame.shape[:2]) - mask[y1:y2, x1:x2] = 255 # bounding box dimensions - - # Extract mask indices from bounding box - indices = np.argwhere(mask) - if len(indices) < 1: - return None - pcl_xyz = rnp.point_cloud2.pointcloud2_to_xyz_array(pcl_msg, remove_nans=False) - - # Create x, y, z array, for indexing. - xyz_points = [] - for x, y in indices: - x, y, z = pcl_xyz[x][y] - xyz_points.append([x, y, z]) - - # Get mean (centroid) point. - x, y, z = np.nanmean(xyz_points, axis=0) - - # Construct PointStamped message. - centroid_ = PointStamped(point=Point(x, y, z), header=Header(1, rospy.Time(0), "xtion_depth_optical_frame")) - - # Transform to map frame - centroid_ = tf_transform('map', point=centroid_).target_point - - return centroid_ \ No newline at end of file diff --git a/legacy/common_math/src/common_math/transformations.py b/legacy/common_math/src/common_math/transformations.py deleted file mode 100644 index 8872cbe84..000000000 --- a/legacy/common_math/src/common_math/transformations.py +++ /dev/null @@ -1,1705 +0,0 @@ -# -*- coding: utf-8 -*- -# transformations.py - -# Copyright (c) 2006, Christoph Gohlke -# Copyright (c) 2006-2009, The Regents of the University of California -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of the copyright holders nor the names of any -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -"""Homogeneous Transformation Matrices and Quaternions. - -A library for calculating 4x4 matrices for translating, rotating, reflecting, -scaling, shearing, projecting, orthogonalizing, and superimposing arrays of -3D homogeneous coordinates as well as for converting between rotation matrices, -Euler angles, and quaternions. Also includes an Arcball control object and -functions to decompose transformation matrices. - -:Authors: - `Christoph Gohlke `__, - Laboratory for Fluorescence Dynamics, University of California, Irvine - -:Version: 20090418 - -Requirements ------------- - -* `Python 2.6 `__ -* `Numpy 1.3 `__ -* `transformations.c 20090418 `__ - (optional implementation of some functions in C) - -Notes ------ - -Matrices (M) can be inverted using numpy.linalg.inv(M), concatenated using -numpy.dot(M0, M1), or used to transform homogeneous coordinates (v) using -numpy.dot(M, v) for shape (4, \*) "point of arrays", respectively -numpy.dot(v, M.T) for shape (\*, 4) "array of points". - -Calculations are carried out with numpy.float64 precision. - -This Python implementation is not optimized for speed. - -Vector, point, quaternion, and matrix function arguments are expected to be -"array like", i.e. tuple, list, or numpy arrays. - -Return types are numpy arrays unless specified otherwise. - -Angles are in radians unless specified otherwise. - -Quaternions ix+jy+kz+w are represented as [x, y, z, w]. - -Use the transpose of transformation matrices for OpenGL glMultMatrixd(). - -A triple of Euler angles can be applied/interpreted in 24 ways, which can -be specified using a 4 character string or encoded 4-tuple: - - *Axes 4-string*: e.g. 'sxyz' or 'ryxy' - - - first character : rotations are applied to 's'tatic or 'r'otating frame - - remaining characters : successive rotation axis 'x', 'y', or 'z' - - *Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1) - - - inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix. - - parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed - by 'z', or 'z' is followed by 'x'. Otherwise odd (1). - - repetition : first and last axis are same (1) or different (0). - - frame : rotations are applied to static (0) or rotating (1) frame. - -References ----------- - -(1) Matrices and transformations. Ronald Goldman. - In "Graphics Gems I", pp 472-475. Morgan Kaufmann, 1990. -(2) More matrices and transformations: shear and pseudo-perspective. - Ronald Goldman. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991. -(3) Decomposing a matrix into simple transformations. Spencer Thomas. - In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991. -(4) Recovering the data from the transformation matrix. Ronald Goldman. - In "Graphics Gems II", pp 324-331. Morgan Kaufmann, 1991. -(5) Euler angle conversion. Ken Shoemake. - In "Graphics Gems IV", pp 222-229. Morgan Kaufmann, 1994. -(6) Arcball rotation control. Ken Shoemake. - In "Graphics Gems IV", pp 175-192. Morgan Kaufmann, 1994. -(7) Representing attitude: Euler angles, unit quaternions, and rotation - vectors. James Diebel. 2006. -(8) A discussion of the solution for the best rotation to relate two sets - of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828. -(9) Closed-form solution of absolute orientation using unit quaternions. - BKP Horn. J Opt Soc Am A. 1987. 4(4), 629-642. -(10) Quaternions. Ken Shoemake. - http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf -(11) From quaternion to matrix and back. JMP van Waveren. 2005. - http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm -(12) Uniform random rotations. Ken Shoemake. - In "Graphics Gems III", pp 124-132. Morgan Kaufmann, 1992. - - -Examples --------- - ->>> alpha, beta, gamma = 0.123, -1.234, 2.345 ->>> origin, xaxis, yaxis, zaxis = (0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1) ->>> I = identity_matrix() ->>> Rx = rotation_matrix(alpha, xaxis) ->>> Ry = rotation_matrix(beta, yaxis) ->>> Rz = rotation_matrix(gamma, zaxis) ->>> R = concatenate_matrices(Rx, Ry, Rz) ->>> euler = euler_from_matrix(R, 'rxyz') ->>> numpy.allclose([alpha, beta, gamma], euler) -True ->>> Re = euler_matrix(alpha, beta, gamma, 'rxyz') ->>> is_same_transform(R, Re) -True ->>> al, be, ga = euler_from_matrix(Re, 'rxyz') ->>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz')) -True ->>> qx = quaternion_about_axis(alpha, xaxis) ->>> qy = quaternion_about_axis(beta, yaxis) ->>> qz = quaternion_about_axis(gamma, zaxis) ->>> q = quaternion_multiply(qx, qy) ->>> q = quaternion_multiply(q, qz) ->>> Rq = quaternion_matrix(q) ->>> is_same_transform(R, Rq) -True ->>> S = scale_matrix(1.23, origin) ->>> T = translation_matrix((1, 2, 3)) ->>> Z = shear_matrix(beta, xaxis, origin, zaxis) ->>> R = random_rotation_matrix(numpy.random.rand(3)) ->>> M = concatenate_matrices(T, R, Z, S) ->>> scale, shear, angles, trans, persp = decompose_matrix(M) ->>> numpy.allclose(scale, 1.23) -True ->>> numpy.allclose(trans, (1, 2, 3)) -True ->>> numpy.allclose(shear, (0, math.tan(beta), 0)) -True ->>> is_same_transform(R, euler_matrix(axes='sxyz', *angles)) -True ->>> M1 = compose_matrix(scale, shear, angles, trans, persp) ->>> is_same_transform(M, M1) -True - -""" - -from __future__ import division - -import warnings -import math - -import numpy - -# Documentation in HTML format can be generated with Epydoc -__docformat__ = "restructuredtext en" - - -def identity_matrix(): - """Return 4x4 identity/unit matrix. - - >>> I = identity_matrix() - >>> numpy.allclose(I, numpy.dot(I, I)) - True - >>> numpy.sum(I), numpy.trace(I) - (4.0, 4.0) - >>> numpy.allclose(I, numpy.identity(4, dtype=numpy.float64)) - True - - """ - return numpy.identity(4, dtype=numpy.float64) - - -def translation_matrix(direction): - """Return matrix to translate by direction vector. - - >>> v = numpy.random.random(3) - 0.5 - >>> numpy.allclose(v, translation_matrix(v)[:3, 3]) - True - - """ - M = numpy.identity(4) - M[:3, 3] = direction[:3] - return M - - -def translation_from_matrix(matrix): - """Return translation vector from translation matrix. - - >>> v0 = numpy.random.random(3) - 0.5 - >>> v1 = translation_from_matrix(translation_matrix(v0)) - >>> numpy.allclose(v0, v1) - True - - """ - return numpy.array(matrix, copy=False)[:3, 3].copy() - - -def reflection_matrix(point, normal): - """Return matrix to mirror at plane defined by point and normal vector. - - >>> v0 = numpy.random.random(4) - 0.5 - >>> v0[3] = 1.0 - >>> v1 = numpy.random.random(3) - 0.5 - >>> R = reflection_matrix(v0, v1) - >>> numpy.allclose(2., numpy.trace(R)) - True - >>> numpy.allclose(v0, numpy.dot(R, v0)) - True - >>> v2 = v0.copy() - >>> v2[:3] += v1 - >>> v3 = v0.copy() - >>> v2[:3] -= v1 - >>> numpy.allclose(v2, numpy.dot(R, v3)) - True - - """ - normal = unit_vector(normal[:3]) - M = numpy.identity(4) - M[:3, :3] -= 2.0 * numpy.outer(normal, normal) - M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal - return M - - -def reflection_from_matrix(matrix): - """Return mirror plane point and normal vector from reflection matrix. - - >>> v0 = numpy.random.random(3) - 0.5 - >>> v1 = numpy.random.random(3) - 0.5 - >>> M0 = reflection_matrix(v0, v1) - >>> point, normal = reflection_from_matrix(M0) - >>> M1 = reflection_matrix(point, normal) - >>> is_same_transform(M0, M1) - True - - """ - M = numpy.array(matrix, dtype=numpy.float64, copy=False) - # normal: unit eigenvector corresponding to eigenvalue -1 - l, V = numpy.linalg.eig(M[:3, :3]) - i = numpy.where(abs(numpy.real(l) + 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no unit eigenvector corresponding to eigenvalue -1") - normal = numpy.real(V[:, i[0]]).squeeze() - # point: any unit eigenvector corresponding to eigenvalue 1 - l, V = numpy.linalg.eig(M) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no unit eigenvector corresponding to eigenvalue 1") - point = numpy.real(V[:, i[-1]]).squeeze() - point /= point[3] - return point, normal - - -def rotation_matrix(angle, direction, point=None): - """Return matrix to rotate about axis defined by point and direction. - - >>> angle = (random.random() - 0.5) * (2*math.pi) - >>> direc = numpy.random.random(3) - 0.5 - >>> point = numpy.random.random(3) - 0.5 - >>> R0 = rotation_matrix(angle, direc, point) - >>> R1 = rotation_matrix(angle-2*math.pi, direc, point) - >>> is_same_transform(R0, R1) - True - >>> R0 = rotation_matrix(angle, direc, point) - >>> R1 = rotation_matrix(-angle, -direc, point) - >>> is_same_transform(R0, R1) - True - >>> I = numpy.identity(4, numpy.float64) - >>> numpy.allclose(I, rotation_matrix(math.pi*2, direc)) - True - >>> numpy.allclose(2., numpy.trace(rotation_matrix(math.pi/2, - ... direc, point))) - True - - """ - sina = math.sin(angle) - cosa = math.cos(angle) - direction = unit_vector(direction[:3]) - # rotation matrix around unit vector - R = numpy.array(((cosa, 0.0, 0.0), - (0.0, cosa, 0.0), - (0.0, 0.0, cosa)), dtype=numpy.float64) - R += numpy.outer(direction, direction) * (1.0 - cosa) - direction *= sina - R += numpy.array((( 0.0, -direction[2], direction[1]), - ( direction[2], 0.0, -direction[0]), - (-direction[1], direction[0], 0.0)), - dtype=numpy.float64) - M = numpy.identity(4) - M[:3, :3] = R - if point is not None: - # rotation not around origin - point = numpy.array(point[:3], dtype=numpy.float64, copy=False) - M[:3, 3] = point - numpy.dot(R, point) - return M - - -def rotation_from_matrix(matrix): - """Return rotation angle and axis from rotation matrix. - - >>> angle = (random.random() - 0.5) * (2*math.pi) - >>> direc = numpy.random.random(3) - 0.5 - >>> point = numpy.random.random(3) - 0.5 - >>> R0 = rotation_matrix(angle, direc, point) - >>> angle, direc, point = rotation_from_matrix(R0) - >>> R1 = rotation_matrix(angle, direc, point) - >>> is_same_transform(R0, R1) - True - - """ - R = numpy.array(matrix, dtype=numpy.float64, copy=False) - R33 = R[:3, :3] - # direction: unit eigenvector of R33 corresponding to eigenvalue of 1 - l, W = numpy.linalg.eig(R33.T) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no unit eigenvector corresponding to eigenvalue 1") - direction = numpy.real(W[:, i[-1]]).squeeze() - # point: unit eigenvector of R33 corresponding to eigenvalue of 1 - l, Q = numpy.linalg.eig(R) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no unit eigenvector corresponding to eigenvalue 1") - point = numpy.real(Q[:, i[-1]]).squeeze() - point /= point[3] - # rotation angle depending on direction - cosa = (numpy.trace(R33) - 1.0) / 2.0 - if abs(direction[2]) > 1e-8: - sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2] - elif abs(direction[1]) > 1e-8: - sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1] - else: - sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0] - angle = math.atan2(sina, cosa) - return angle, direction, point - - -def scale_matrix(factor, origin=None, direction=None): - """Return matrix to scale by factor around origin in direction. - - Use factor -1 for point symmetry. - - >>> v = (numpy.random.rand(4, 5) - 0.5) * 20.0 - >>> v[3] = 1.0 - >>> S = scale_matrix(-1.234) - >>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3]) - True - >>> factor = random.random() * 10 - 5 - >>> origin = numpy.random.random(3) - 0.5 - >>> direct = numpy.random.random(3) - 0.5 - >>> S = scale_matrix(factor, origin) - >>> S = scale_matrix(factor, origin, direct) - - """ - if direction is None: - # uniform scaling - M = numpy.array(((factor, 0.0, 0.0, 0.0), - (0.0, factor, 0.0, 0.0), - (0.0, 0.0, factor, 0.0), - (0.0, 0.0, 0.0, 1.0)), dtype=numpy.float64) - if origin is not None: - M[:3, 3] = origin[:3] - M[:3, 3] *= 1.0 - factor - else: - # nonuniform scaling - direction = unit_vector(direction[:3]) - factor = 1.0 - factor - M = numpy.identity(4) - M[:3, :3] -= factor * numpy.outer(direction, direction) - if origin is not None: - M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction - return M - - -def scale_from_matrix(matrix): - """Return scaling factor, origin and direction from scaling matrix. - - >>> factor = random.random() * 10 - 5 - >>> origin = numpy.random.random(3) - 0.5 - >>> direct = numpy.random.random(3) - 0.5 - >>> S0 = scale_matrix(factor, origin) - >>> factor, origin, direction = scale_from_matrix(S0) - >>> S1 = scale_matrix(factor, origin, direction) - >>> is_same_transform(S0, S1) - True - >>> S0 = scale_matrix(factor, origin, direct) - >>> factor, origin, direction = scale_from_matrix(S0) - >>> S1 = scale_matrix(factor, origin, direction) - >>> is_same_transform(S0, S1) - True - - """ - M = numpy.array(matrix, dtype=numpy.float64, copy=False) - M33 = M[:3, :3] - factor = numpy.trace(M33) - 2.0 - try: - # direction: unit eigenvector corresponding to eigenvalue factor - l, V = numpy.linalg.eig(M33) - i = numpy.where(abs(numpy.real(l) - factor) < 1e-8)[0][0] - direction = numpy.real(V[:, i]).squeeze() - direction /= vector_norm(direction) - except IndexError: - # uniform scaling - factor = (factor + 2.0) / 3.0 - direction = None - # origin: any eigenvector corresponding to eigenvalue 1 - l, V = numpy.linalg.eig(M) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no eigenvector corresponding to eigenvalue 1") - origin = numpy.real(V[:, i[-1]]).squeeze() - origin /= origin[3] - return factor, origin, direction - - -def projection_matrix(point, normal, direction=None, - perspective=None, pseudo=False): - """Return matrix to project onto plane defined by point and normal. - - Using either perspective point, projection direction, or none of both. - - If pseudo is True, perspective projections will preserve relative depth - such that Perspective = dot(Orthogonal, PseudoPerspective). - - >>> P = projection_matrix((0, 0, 0), (1, 0, 0)) - >>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:]) - True - >>> point = numpy.random.random(3) - 0.5 - >>> normal = numpy.random.random(3) - 0.5 - >>> direct = numpy.random.random(3) - 0.5 - >>> persp = numpy.random.random(3) - 0.5 - >>> P0 = projection_matrix(point, normal) - >>> P1 = projection_matrix(point, normal, direction=direct) - >>> P2 = projection_matrix(point, normal, perspective=persp) - >>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True) - >>> is_same_transform(P2, numpy.dot(P0, P3)) - True - >>> P = projection_matrix((3, 0, 0), (1, 1, 0), (1, 0, 0)) - >>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20.0 - >>> v0[3] = 1.0 - >>> v1 = numpy.dot(P, v0) - >>> numpy.allclose(v1[1], v0[1]) - True - >>> numpy.allclose(v1[0], 3.0-v1[1]) - True - - """ - M = numpy.identity(4) - point = numpy.array(point[:3], dtype=numpy.float64, copy=False) - normal = unit_vector(normal[:3]) - if perspective is not None: - # perspective projection - perspective = numpy.array(perspective[:3], dtype=numpy.float64, - copy=False) - M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal) - M[:3, :3] -= numpy.outer(perspective, normal) - if pseudo: - # preserve relative depth - M[:3, :3] -= numpy.outer(normal, normal) - M[:3, 3] = numpy.dot(point, normal) * (perspective+normal) - else: - M[:3, 3] = numpy.dot(point, normal) * perspective - M[3, :3] = -normal - M[3, 3] = numpy.dot(perspective, normal) - elif direction is not None: - # parallel projection - direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False) - scale = numpy.dot(direction, normal) - M[:3, :3] -= numpy.outer(direction, normal) / scale - M[:3, 3] = direction * (numpy.dot(point, normal) / scale) - else: - # orthogonal projection - M[:3, :3] -= numpy.outer(normal, normal) - M[:3, 3] = numpy.dot(point, normal) * normal - return M - - -def projection_from_matrix(matrix, pseudo=False): - """Return projection plane and perspective point from projection matrix. - - Return values are same as arguments for projection_matrix function: - point, normal, direction, perspective, and pseudo. - - >>> point = numpy.random.random(3) - 0.5 - >>> normal = numpy.random.random(3) - 0.5 - >>> direct = numpy.random.random(3) - 0.5 - >>> persp = numpy.random.random(3) - 0.5 - >>> P0 = projection_matrix(point, normal) - >>> result = projection_from_matrix(P0) - >>> P1 = projection_matrix(*result) - >>> is_same_transform(P0, P1) - True - >>> P0 = projection_matrix(point, normal, direct) - >>> result = projection_from_matrix(P0) - >>> P1 = projection_matrix(*result) - >>> is_same_transform(P0, P1) - True - >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False) - >>> result = projection_from_matrix(P0, pseudo=False) - >>> P1 = projection_matrix(*result) - >>> is_same_transform(P0, P1) - True - >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True) - >>> result = projection_from_matrix(P0, pseudo=True) - >>> P1 = projection_matrix(*result) - >>> is_same_transform(P0, P1) - True - - """ - M = numpy.array(matrix, dtype=numpy.float64, copy=False) - M33 = M[:3, :3] - l, V = numpy.linalg.eig(M) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] - if not pseudo and len(i): - # point: any eigenvector corresponding to eigenvalue 1 - point = numpy.real(V[:, i[-1]]).squeeze() - point /= point[3] - # direction: unit eigenvector corresponding to eigenvalue 0 - l, V = numpy.linalg.eig(M33) - i = numpy.where(abs(numpy.real(l)) < 1e-8)[0] - if not len(i): - raise ValueError("no eigenvector corresponding to eigenvalue 0") - direction = numpy.real(V[:, i[0]]).squeeze() - direction /= vector_norm(direction) - # normal: unit eigenvector of M33.T corresponding to eigenvalue 0 - l, V = numpy.linalg.eig(M33.T) - i = numpy.where(abs(numpy.real(l)) < 1e-8)[0] - if len(i): - # parallel projection - normal = numpy.real(V[:, i[0]]).squeeze() - normal /= vector_norm(normal) - return point, normal, direction, None, False - else: - # orthogonal projection, where normal equals direction vector - return point, direction, None, None, False - else: - # perspective projection - i = numpy.where(abs(numpy.real(l)) > 1e-8)[0] - if not len(i): - raise ValueError( - "no eigenvector not corresponding to eigenvalue 0") - point = numpy.real(V[:, i[-1]]).squeeze() - point /= point[3] - normal = - M[3, :3] - perspective = M[:3, 3] / numpy.dot(point[:3], normal) - if pseudo: - perspective -= normal - return point, normal, None, perspective, pseudo - - -def clip_matrix(left, right, bottom, top, near, far, perspective=False): - """Return matrix to obtain normalized device coordinates from frustrum. - - The frustrum bounds are axis-aligned along x (left, right), - y (bottom, top) and z (near, far). - - Normalized device coordinates are in range [-1, 1] if coordinates are - inside the frustrum. - - If perspective is True the frustrum is a truncated pyramid with the - perspective point at origin and direction along z axis, otherwise an - orthographic canonical view volume (a box). - - Homogeneous coordinates transformed by the perspective clip matrix - need to be dehomogenized (devided by w coordinate). - - >>> frustrum = numpy.random.rand(6) - >>> frustrum[1] += frustrum[0] - >>> frustrum[3] += frustrum[2] - >>> frustrum[5] += frustrum[4] - >>> M = clip_matrix(*frustrum, perspective=False) - >>> numpy.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0]) - array([-1., -1., -1., 1.]) - >>> numpy.dot(M, [frustrum[1], frustrum[3], frustrum[5], 1.0]) - array([ 1., 1., 1., 1.]) - >>> M = clip_matrix(*frustrum, perspective=True) - >>> v = numpy.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0]) - >>> v / v[3] - array([-1., -1., -1., 1.]) - >>> v = numpy.dot(M, [frustrum[1], frustrum[3], frustrum[4], 1.0]) - >>> v / v[3] - array([ 1., 1., -1., 1.]) - - """ - if left >= right or bottom >= top or near >= far: - raise ValueError("invalid frustrum") - if perspective: - if near <= _EPS: - raise ValueError("invalid frustrum: near <= 0") - t = 2.0 * near - M = ((-t/(right-left), 0.0, (right+left)/(right-left), 0.0), - (0.0, -t/(top-bottom), (top+bottom)/(top-bottom), 0.0), - (0.0, 0.0, -(far+near)/(far-near), t*far/(far-near)), - (0.0, 0.0, -1.0, 0.0)) - else: - M = ((2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)), - (0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)), - (0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)), - (0.0, 0.0, 0.0, 1.0)) - return numpy.array(M, dtype=numpy.float64) - - -def shear_matrix(angle, direction, point, normal): - """Return matrix to shear by angle along direction vector on shear plane. - - The shear plane is defined by a point and normal vector. The direction - vector must be orthogonal to the plane's normal vector. - - A point P is transformed by the shear matrix into P" such that - the vector P-P" is parallel to the direction vector and its extent is - given by the angle of P-P'-P", where P' is the orthogonal projection - of P onto the shear plane. - - >>> angle = (random.random() - 0.5) * 4*math.pi - >>> direct = numpy.random.random(3) - 0.5 - >>> point = numpy.random.random(3) - 0.5 - >>> normal = numpy.cross(direct, numpy.random.random(3)) - >>> S = shear_matrix(angle, direct, point, normal) - >>> numpy.allclose(1.0, numpy.linalg.det(S)) - True - - """ - normal = unit_vector(normal[:3]) - direction = unit_vector(direction[:3]) - if abs(numpy.dot(normal, direction)) > 1e-6: - raise ValueError("direction and normal vectors are not orthogonal") - angle = math.tan(angle) - M = numpy.identity(4) - M[:3, :3] += angle * numpy.outer(direction, normal) - M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction - return M - - -def shear_from_matrix(matrix): - """Return shear angle, direction and plane from shear matrix. - - >>> angle = (random.random() - 0.5) * 4*math.pi - >>> direct = numpy.random.random(3) - 0.5 - >>> point = numpy.random.random(3) - 0.5 - >>> normal = numpy.cross(direct, numpy.random.random(3)) - >>> S0 = shear_matrix(angle, direct, point, normal) - >>> angle, direct, point, normal = shear_from_matrix(S0) - >>> S1 = shear_matrix(angle, direct, point, normal) - >>> is_same_transform(S0, S1) - True - - """ - M = numpy.array(matrix, dtype=numpy.float64, copy=False) - M33 = M[:3, :3] - # normal: cross independent eigenvectors corresponding to the eigenvalue 1 - l, V = numpy.linalg.eig(M33) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-4)[0] - if len(i) < 2: - raise ValueError("No two linear independent eigenvectors found {}".format(l)) - V = numpy.real(V[:, i]).squeeze().T - lenorm = -1.0 - for i0, i1 in ((0, 1), (0, 2), (1, 2)): - n = numpy.cross(V[i0], V[i1]) - l = vector_norm(n) - if l > lenorm: - lenorm = l - normal = n - normal /= lenorm - # direction and angle - direction = numpy.dot(M33 - numpy.identity(3), normal) - angle = vector_norm(direction) - direction /= angle - angle = math.atan(angle) - # point: eigenvector corresponding to eigenvalue 1 - l, V = numpy.linalg.eig(M) - i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no eigenvector corresponding to eigenvalue 1") - point = numpy.real(V[:, i[-1]]).squeeze() - point /= point[3] - return angle, direction, point, normal - - -def decompose_matrix(matrix): - """Return sequence of transformations from transformation matrix. - - matrix : array_like - Non-degenerative homogeneous transformation matrix - - Return tuple of: - scale : vector of 3 scaling factors - shear : list of shear factors for x-y, x-z, y-z axes - angles : list of Euler angles about static x, y, z axes - translate : translation vector along x, y, z axes - perspective : perspective partition of matrix - - Raise ValueError if matrix is of wrong type or degenerative. - - >>> T0 = translation_matrix((1, 2, 3)) - >>> scale, shear, angles, trans, persp = decompose_matrix(T0) - >>> T1 = translation_matrix(trans) - >>> numpy.allclose(T0, T1) - True - >>> S = scale_matrix(0.123) - >>> scale, shear, angles, trans, persp = decompose_matrix(S) - >>> scale[0] - 0.123 - >>> R0 = euler_matrix(1, 2, 3) - >>> scale, shear, angles, trans, persp = decompose_matrix(R0) - >>> R1 = euler_matrix(*angles) - >>> numpy.allclose(R0, R1) - True - - """ - M = numpy.array(matrix, dtype=numpy.float64, copy=True).T - if abs(M[3, 3]) < _EPS: - raise ValueError("M[3, 3] is zero") - M /= M[3, 3] - P = M.copy() - P[:, 3] = 0, 0, 0, 1 - if not numpy.linalg.det(P): - raise ValueError("Matrix is singular") - - scale = numpy.zeros((3, ), dtype=numpy.float64) - shear = [0, 0, 0] - angles = [0, 0, 0] - - if any(abs(M[:3, 3]) > _EPS): - perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T)) - M[:, 3] = 0, 0, 0, 1 - else: - perspective = numpy.array((0, 0, 0, 1), dtype=numpy.float64) - - translate = M[3, :3].copy() - M[3, :3] = 0 - - row = M[:3, :3].copy() - scale[0] = vector_norm(row[0]) - row[0] /= scale[0] - shear[0] = numpy.dot(row[0], row[1]) - row[1] -= row[0] * shear[0] - scale[1] = vector_norm(row[1]) - row[1] /= scale[1] - shear[0] /= scale[1] - shear[1] = numpy.dot(row[0], row[2]) - row[2] -= row[0] * shear[1] - shear[2] = numpy.dot(row[1], row[2]) - row[2] -= row[1] * shear[2] - scale[2] = vector_norm(row[2]) - row[2] /= scale[2] - shear[1:] /= scale[2] - - if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0: - scale *= -1 - row *= -1 - - angles[1] = math.asin(-row[0, 2]) - if math.cos(angles[1]): - angles[0] = math.atan2(row[1, 2], row[2, 2]) - angles[2] = math.atan2(row[0, 1], row[0, 0]) - else: - #angles[0] = math.atan2(row[1, 0], row[1, 1]) - angles[0] = math.atan2(-row[2, 1], row[1, 1]) - angles[2] = 0.0 - - return scale, shear, angles, translate, perspective - - -def compose_matrix(scale=None, shear=None, angles=None, translate=None, - perspective=None): - """Return transformation matrix from sequence of transformations. - - This is the inverse of the decompose_matrix function. - - Sequence of transformations: - scale : vector of 3 scaling factors - shear : list of shear factors for x-y, x-z, y-z axes - angles : list of Euler angles about static x, y, z axes - translate : translation vector along x, y, z axes - perspective : perspective partition of matrix - - >>> scale = numpy.random.random(3) - 0.5 - >>> shear = numpy.random.random(3) - 0.5 - >>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi) - >>> trans = numpy.random.random(3) - 0.5 - >>> persp = numpy.random.random(4) - 0.5 - >>> M0 = compose_matrix(scale, shear, angles, trans, persp) - >>> result = decompose_matrix(M0) - >>> M1 = compose_matrix(*result) - >>> is_same_transform(M0, M1) - True - - """ - M = numpy.identity(4) - if perspective is not None: - P = numpy.identity(4) - P[3, :] = perspective[:4] - M = numpy.dot(M, P) - if translate is not None: - T = numpy.identity(4) - T[:3, 3] = translate[:3] - M = numpy.dot(M, T) - if angles is not None: - R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz') - M = numpy.dot(M, R) - if shear is not None: - Z = numpy.identity(4) - Z[1, 2] = shear[2] - Z[0, 2] = shear[1] - Z[0, 1] = shear[0] - M = numpy.dot(M, Z) - if scale is not None: - S = numpy.identity(4) - S[0, 0] = scale[0] - S[1, 1] = scale[1] - S[2, 2] = scale[2] - M = numpy.dot(M, S) - M /= M[3, 3] - return M - - -def orthogonalization_matrix(lengths, angles): - """Return orthogonalization matrix for crystallographic cell coordinates. - - Angles are expected in degrees. - - The de-orthogonalization matrix is the inverse. - - >>> O = orthogonalization_matrix((10., 10., 10.), (90., 90., 90.)) - >>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10) - True - >>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7]) - >>> numpy.allclose(numpy.sum(O), 43.063229) - True - - """ - a, b, c = lengths - angles = numpy.radians(angles) - sina, sinb, _ = numpy.sin(angles) - cosa, cosb, cosg = numpy.cos(angles) - co = (cosa * cosb - cosg) / (sina * sinb) - return numpy.array(( - ( a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0), - (-a*sinb*co, b*sina, 0.0, 0.0), - ( a*cosb, b*cosa, c, 0.0), - ( 0.0, 0.0, 0.0, 1.0)), - dtype=numpy.float64) - - -def superimposition_matrix(v0, v1, scaling=False, usesvd=True): - """Return matrix to transform given vector set into second vector set. - - v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 vectors. - - If usesvd is True, the weighted sum of squared deviations (RMSD) is - minimized according to the algorithm by W. Kabsch [8]. Otherwise the - quaternion based algorithm by B. Horn [9] is used (slower when using - this Python implementation). - - The returned matrix performs rotation, translation and uniform scaling - (if specified). - - >>> v0 = numpy.random.rand(3, 10) - >>> M = superimposition_matrix(v0, v0) - >>> numpy.allclose(M, numpy.identity(4)) - True - >>> R = random_rotation_matrix(numpy.random.random(3)) - >>> v0 = ((1,0,0), (0,1,0), (0,0,1), (1,1,1)) - >>> v1 = numpy.dot(R, v0) - >>> M = superimposition_matrix(v0, v1) - >>> numpy.allclose(v1, numpy.dot(M, v0)) - True - >>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20.0 - >>> v0[3] = 1.0 - >>> v1 = numpy.dot(R, v0) - >>> M = superimposition_matrix(v0, v1) - >>> numpy.allclose(v1, numpy.dot(M, v0)) - True - >>> S = scale_matrix(random.random()) - >>> T = translation_matrix(numpy.random.random(3)-0.5) - >>> M = concatenate_matrices(T, R, S) - >>> v1 = numpy.dot(M, v0) - >>> v0[:3] += numpy.random.normal(0.0, 1e-9, 300).reshape(3, -1) - >>> M = superimposition_matrix(v0, v1, scaling=True) - >>> numpy.allclose(v1, numpy.dot(M, v0)) - True - >>> M = superimposition_matrix(v0, v1, scaling=True, usesvd=False) - >>> numpy.allclose(v1, numpy.dot(M, v0)) - True - >>> v = numpy.empty((4, 100, 3), dtype=numpy.float64) - >>> v[:, :, 0] = v0 - >>> M = superimposition_matrix(v0, v1, scaling=True, usesvd=False) - >>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0])) - True - - """ - v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3] - v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3] - - if v0.shape != v1.shape or v0.shape[1] < 3: - raise ValueError("Vector sets are of wrong shape or type.") - - # move centroids to origin - t0 = numpy.mean(v0, axis=1) - t1 = numpy.mean(v1, axis=1) - v0 = v0 - t0.reshape(3, 1) - v1 = v1 - t1.reshape(3, 1) - - if usesvd: - # Singular Value Decomposition of covariance matrix - u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T)) - # rotation matrix from SVD orthonormal bases - R = numpy.dot(u, vh) - if numpy.linalg.det(R) < 0.0: - # R does not constitute right handed system - R -= numpy.outer(u[:, 2], vh[2, :]*2.0) - s[-1] *= -1.0 - # homogeneous transformation matrix - M = numpy.identity(4) - M[:3, :3] = R - else: - # compute symmetric matrix N - xx, yy, zz = numpy.sum(v0 * v1, axis=1) - xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1) - xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1) - N = ((xx+yy+zz, yz-zy, zx-xz, xy-yx), - (yz-zy, xx-yy-zz, xy+yx, zx+xz), - (zx-xz, xy+yx, -xx+yy-zz, yz+zy), - (xy-yx, zx+xz, yz+zy, -xx-yy+zz)) - # quaternion: eigenvector corresponding to most positive eigenvalue - l, V = numpy.linalg.eig(N) - q = V[:, numpy.argmax(l)] - q /= vector_norm(q) # unit quaternion - q = numpy.roll(q, -1) # move w component to end - # homogeneous transformation matrix - M = quaternion_matrix(q) - - # scale: ratio of rms deviations from centroid - if scaling: - v0 *= v0 - v1 *= v1 - M[:3, :3] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0)) - - # translation - M[:3, 3] = t1 - T = numpy.identity(4) - T[:3, 3] = -t0 - M = numpy.dot(M, T) - return M - - -def euler_matrix(ai, aj, ak, axes='sxyz'): - """Return homogeneous rotation matrix from Euler angles and axis sequence. - - ai, aj, ak : Euler's roll, pitch and yaw angles - axes : One of 24 axis sequences as string or encoded tuple - - >>> R = euler_matrix(1, 2, 3, 'syxz') - >>> numpy.allclose(numpy.sum(R[0]), -1.34786452) - True - >>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1)) - >>> numpy.allclose(numpy.sum(R[0]), -0.383436184) - True - >>> ai, aj, ak = (4.0*math.pi) * (numpy.random.random(3) - 0.5) - >>> for axes in _AXES2TUPLE.keys(): - ... R = euler_matrix(ai, aj, ak, axes) - >>> for axes in _TUPLE2AXES.keys(): - ... R = euler_matrix(ai, aj, ak, axes) - - """ - try: - firstaxis, parity, repetition, frame = _AXES2TUPLE[axes] - except (AttributeError, KeyError): - _ = _TUPLE2AXES[axes] - firstaxis, parity, repetition, frame = axes - - i = firstaxis - j = _NEXT_AXIS[i+parity] - k = _NEXT_AXIS[i-parity+1] - - if frame: - ai, ak = ak, ai - if parity: - ai, aj, ak = -ai, -aj, -ak - - si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak) - ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak) - cc, cs = ci*ck, ci*sk - sc, ss = si*ck, si*sk - - M = numpy.identity(4) - if repetition: - M[i, i] = cj - M[i, j] = sj*si - M[i, k] = sj*ci - M[j, i] = sj*sk - M[j, j] = -cj*ss+cc - M[j, k] = -cj*cs-sc - M[k, i] = -sj*ck - M[k, j] = cj*sc+cs - M[k, k] = cj*cc-ss - else: - M[i, i] = cj*ck - M[i, j] = sj*sc-cs - M[i, k] = sj*cc+ss - M[j, i] = cj*sk - M[j, j] = sj*ss+cc - M[j, k] = sj*cs-sc - M[k, i] = -sj - M[k, j] = cj*si - M[k, k] = cj*ci - return M - - -def euler_from_matrix(matrix, axes='sxyz'): - """Return Euler angles from rotation matrix for specified axis sequence. - - axes : One of 24 axis sequences as string or encoded tuple - - Note that many Euler angle triplets can describe one matrix. - - >>> R0 = euler_matrix(1, 2, 3, 'syxz') - >>> al, be, ga = euler_from_matrix(R0, 'syxz') - >>> R1 = euler_matrix(al, be, ga, 'syxz') - >>> numpy.allclose(R0, R1) - True - >>> angles = (4.0*math.pi) * (numpy.random.random(3) - 0.5) - >>> for axes in _AXES2TUPLE.keys(): - ... R0 = euler_matrix(axes=axes, *angles) - ... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes)) - ... if not numpy.allclose(R0, R1): print axes, "failed" - - """ - try: - firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()] - except (AttributeError, KeyError): - _ = _TUPLE2AXES[axes] - firstaxis, parity, repetition, frame = axes - - i = firstaxis - j = _NEXT_AXIS[i+parity] - k = _NEXT_AXIS[i-parity+1] - - M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3] - if repetition: - sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k]) - if sy > _EPS: - ax = math.atan2( M[i, j], M[i, k]) - ay = math.atan2( sy, M[i, i]) - az = math.atan2( M[j, i], -M[k, i]) - else: - ax = math.atan2(-M[j, k], M[j, j]) - ay = math.atan2( sy, M[i, i]) - az = 0.0 - else: - cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i]) - if cy > _EPS: - ax = math.atan2( M[k, j], M[k, k]) - ay = math.atan2(-M[k, i], cy) - az = math.atan2( M[j, i], M[i, i]) - else: - ax = math.atan2(-M[j, k], M[j, j]) - ay = math.atan2(-M[k, i], cy) - az = 0.0 - - if parity: - ax, ay, az = -ax, -ay, -az - if frame: - ax, az = az, ax - return ax, ay, az - - -def euler_from_quaternion(quaternion, axes='sxyz'): - """Return Euler angles from quaternion for specified axis sequence. - - >>> angles = euler_from_quaternion([0.06146124, 0, 0, 0.99810947]) - >>> numpy.allclose(angles, [0.123, 0, 0]) - True - - """ - return euler_from_matrix(quaternion_matrix(quaternion), axes) - - -def quaternion_from_euler(ai, aj, ak, axes='sxyz'): - """Return quaternion from Euler angles and axis sequence. - - ai, aj, ak : Euler's roll, pitch and yaw angles - axes : One of 24 axis sequences as string or encoded tuple - - >>> q = quaternion_from_euler(1, 2, 3, 'ryxz') - >>> numpy.allclose(q, [0.310622, -0.718287, 0.444435, 0.435953]) - True - - """ - try: - firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()] - except (AttributeError, KeyError): - _ = _TUPLE2AXES[axes] - firstaxis, parity, repetition, frame = axes - - i = firstaxis - j = _NEXT_AXIS[i+parity] - k = _NEXT_AXIS[i-parity+1] - - if frame: - ai, ak = ak, ai - if parity: - aj = -aj - - ai /= 2.0 - aj /= 2.0 - ak /= 2.0 - ci = math.cos(ai) - si = math.sin(ai) - cj = math.cos(aj) - sj = math.sin(aj) - ck = math.cos(ak) - sk = math.sin(ak) - cc = ci*ck - cs = ci*sk - sc = si*ck - ss = si*sk - - quaternion = numpy.empty((4, ), dtype=numpy.float64) - if repetition: - quaternion[i] = cj*(cs + sc) - quaternion[j] = sj*(cc + ss) - quaternion[k] = sj*(cs - sc) - quaternion[3] = cj*(cc - ss) - else: - quaternion[i] = cj*sc - sj*cs - quaternion[j] = cj*ss + sj*cc - quaternion[k] = cj*cs - sj*sc - quaternion[3] = cj*cc + sj*ss - if parity: - quaternion[j] *= -1 - - return quaternion - - -def quaternion_about_axis(angle, axis): - """Return quaternion for rotation about axis. - - >>> q = quaternion_about_axis(0.123, (1, 0, 0)) - >>> numpy.allclose(q, [0.06146124, 0, 0, 0.99810947]) - True - - """ - quaternion = numpy.zeros((4, ), dtype=numpy.float64) - quaternion[:3] = axis[:3] - qlen = vector_norm(quaternion) - if qlen > _EPS: - quaternion *= math.sin(angle/2.0) / qlen - quaternion[3] = math.cos(angle/2.0) - return quaternion - - -def quaternion_matrix(quaternion): - """Return homogeneous rotation matrix from quaternion. - - >>> R = quaternion_matrix([0.06146124, 0, 0, 0.99810947]) - >>> numpy.allclose(R, rotation_matrix(0.123, (1, 0, 0))) - True - - """ - q = numpy.array(quaternion[:4], dtype=numpy.float64, copy=True) - nq = numpy.dot(q, q) - if nq < _EPS: - return numpy.identity(4) - q *= math.sqrt(2.0 / nq) - q = numpy.outer(q, q) - return numpy.array(( - (1.0-q[1, 1]-q[2, 2], q[0, 1]-q[2, 3], q[0, 2]+q[1, 3], 0.0), - ( q[0, 1]+q[2, 3], 1.0-q[0, 0]-q[2, 2], q[1, 2]-q[0, 3], 0.0), - ( q[0, 2]-q[1, 3], q[1, 2]+q[0, 3], 1.0-q[0, 0]-q[1, 1], 0.0), - ( 0.0, 0.0, 0.0, 1.0) - ), dtype=numpy.float64) - - -def quaternion_from_matrix(matrix): - """Return quaternion from rotation matrix. - - >>> R = rotation_matrix(0.123, (1, 2, 3)) - >>> q = quaternion_from_matrix(R) - >>> numpy.allclose(q, [0.0164262, 0.0328524, 0.0492786, 0.9981095]) - True - - """ - q = numpy.empty((4, ), dtype=numpy.float64) - M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4] - t = numpy.trace(M) - if t > M[3, 3]: - q[3] = t - q[2] = M[1, 0] - M[0, 1] - q[1] = M[0, 2] - M[2, 0] - q[0] = M[2, 1] - M[1, 2] - else: - i, j, k = 0, 1, 2 - if M[1, 1] > M[0, 0]: - i, j, k = 1, 2, 0 - if M[2, 2] > M[i, i]: - i, j, k = 2, 0, 1 - t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3] - q[i] = t - q[j] = M[i, j] + M[j, i] - q[k] = M[k, i] + M[i, k] - q[3] = M[k, j] - M[j, k] - q *= 0.5 / math.sqrt(t * M[3, 3]) - return q - - -def quaternion_multiply(quaternion1, quaternion0): - """Return multiplication of two quaternions. - - >>> q = quaternion_multiply([1, -2, 3, 4], [-5, 6, 7, 8]) - >>> numpy.allclose(q, [-44, -14, 48, 28]) - True - - """ - x0, y0, z0, w0 = quaternion0 - x1, y1, z1, w1 = quaternion1 - return numpy.array(( - x1*w0 + y1*z0 - z1*y0 + w1*x0, - -x1*z0 + y1*w0 + z1*x0 + w1*y0, - x1*y0 - y1*x0 + z1*w0 + w1*z0, - -x1*x0 - y1*y0 - z1*z0 + w1*w0), dtype=numpy.float64) - - -def quaternion_conjugate(quaternion): - """Return conjugate of quaternion. - - >>> q0 = random_quaternion() - >>> q1 = quaternion_conjugate(q0) - >>> q1[3] == q0[3] and all(q1[:3] == -q0[:3]) - True - - """ - return numpy.array((-quaternion[0], -quaternion[1], - -quaternion[2], quaternion[3]), dtype=numpy.float64) - - -def quaternion_inverse(quaternion): - """Return inverse of quaternion. - - >>> q0 = random_quaternion() - >>> q1 = quaternion_inverse(q0) - >>> numpy.allclose(quaternion_multiply(q0, q1), [0, 0, 0, 1]) - True - - """ - return quaternion_conjugate(quaternion) / numpy.dot(quaternion, quaternion) - - -def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True): - """Return spherical linear interpolation between two quaternions. - - >>> q0 = random_quaternion() - >>> q1 = random_quaternion() - >>> q = quaternion_slerp(q0, q1, 0.0) - >>> numpy.allclose(q, q0) - True - >>> q = quaternion_slerp(q0, q1, 1.0, 1) - >>> numpy.allclose(q, q1) - True - >>> q = quaternion_slerp(q0, q1, 0.5) - >>> angle = math.acos(numpy.dot(q0, q)) - >>> numpy.allclose(2.0, math.acos(numpy.dot(q0, q1)) / angle) or \ - numpy.allclose(2.0, math.acos(-numpy.dot(q0, q1)) / angle) - True - - """ - q0 = unit_vector(quat0[:4]) - q1 = unit_vector(quat1[:4]) - if fraction == 0.0: - return q0 - elif fraction == 1.0: - return q1 - d = numpy.dot(q0, q1) - if abs(abs(d) - 1.0) < _EPS: - return q0 - if shortestpath and d < 0.0: - # invert rotation - d = -d - q1 *= -1.0 - angle = math.acos(d) + spin * math.pi - if abs(angle) < _EPS: - return q0 - isin = 1.0 / math.sin(angle) - q0 *= math.sin((1.0 - fraction) * angle) * isin - q1 *= math.sin(fraction * angle) * isin - q0 += q1 - return q0 - - -def random_quaternion(rand=None): - """Return uniform random unit quaternion. - - rand: array like or None - Three independent random variables that are uniformly distributed - between 0 and 1. - - >>> q = random_quaternion() - >>> numpy.allclose(1.0, vector_norm(q)) - True - >>> q = random_quaternion(numpy.random.random(3)) - >>> q.shape - (4,) - - """ - if rand is None: - rand = numpy.random.rand(3) - else: - assert len(rand) == 3 - r1 = numpy.sqrt(1.0 - rand[0]) - r2 = numpy.sqrt(rand[0]) - pi2 = math.pi * 2.0 - t1 = pi2 * rand[1] - t2 = pi2 * rand[2] - return numpy.array((numpy.sin(t1)*r1, - numpy.cos(t1)*r1, - numpy.sin(t2)*r2, - numpy.cos(t2)*r2), dtype=numpy.float64) - - -def random_rotation_matrix(rand=None): - """Return uniform random rotation matrix. - - rnd: array like - Three independent random variables that are uniformly distributed - between 0 and 1 for each returned quaternion. - - >>> R = random_rotation_matrix() - >>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4)) - True - - """ - return quaternion_matrix(random_quaternion(rand)) - - -class Arcball(object): - """Virtual Trackball Control. - - >>> ball = Arcball() - >>> ball = Arcball(initial=numpy.identity(4)) - >>> ball.place([320, 320], 320) - >>> ball.down([500, 250]) - >>> ball.drag([475, 275]) - >>> R = ball.matrix() - >>> numpy.allclose(numpy.sum(R), 3.90583455) - True - >>> ball = Arcball(initial=[0, 0, 0, 1]) - >>> ball.place([320, 320], 320) - >>> ball.setaxes([1,1,0], [-1, 1, 0]) - >>> ball.setconstrain(True) - >>> ball.down([400, 200]) - >>> ball.drag([200, 400]) - >>> R = ball.matrix() - >>> numpy.allclose(numpy.sum(R), 0.2055924) - True - >>> ball.next() - - """ - - def __init__(self, initial=None): - """Initialize virtual trackball control. - - initial : quaternion or rotation matrix - - """ - self._axis = None - self._axes = None - self._radius = 1.0 - self._center = [0.0, 0.0] - self._vdown = numpy.array([0, 0, 1], dtype=numpy.float64) - self._constrain = False - - if initial is None: - self._qdown = numpy.array([0, 0, 0, 1], dtype=numpy.float64) - else: - initial = numpy.array(initial, dtype=numpy.float64) - if initial.shape == (4, 4): - self._qdown = quaternion_from_matrix(initial) - elif initial.shape == (4, ): - initial /= vector_norm(initial) - self._qdown = initial - else: - raise ValueError("initial not a quaternion or matrix.") - - self._qnow = self._qpre = self._qdown - - def place(self, center, radius): - """Place Arcball, e.g. when window size changes. - - center : sequence[2] - Window coordinates of trackball center. - radius : float - Radius of trackball in window coordinates. - - """ - self._radius = float(radius) - self._center[0] = center[0] - self._center[1] = center[1] - - def setaxes(self, *axes): - """Set axes to constrain rotations.""" - if axes is None: - self._axes = None - else: - self._axes = [unit_vector(axis) for axis in axes] - - def setconstrain(self, constrain): - """Set state of constrain to axis mode.""" - self._constrain = constrain == True - - def getconstrain(self): - """Return state of constrain to axis mode.""" - return self._constrain - - def down(self, point): - """Set initial cursor window coordinates and pick constrain-axis.""" - self._vdown = arcball_map_to_sphere(point, self._center, self._radius) - self._qdown = self._qpre = self._qnow - - if self._constrain and self._axes is not None: - self._axis = arcball_nearest_axis(self._vdown, self._axes) - self._vdown = arcball_constrain_to_axis(self._vdown, self._axis) - else: - self._axis = None - - def drag(self, point): - """Update current cursor window coordinates.""" - vnow = arcball_map_to_sphere(point, self._center, self._radius) - - if self._axis is not None: - vnow = arcball_constrain_to_axis(vnow, self._axis) - - self._qpre = self._qnow - - t = numpy.cross(self._vdown, vnow) - if numpy.dot(t, t) < _EPS: - self._qnow = self._qdown - else: - q = [t[0], t[1], t[2], numpy.dot(self._vdown, vnow)] - self._qnow = quaternion_multiply(q, self._qdown) - - def next(self, acceleration=0.0): - """Continue rotation in direction of last drag.""" - q = quaternion_slerp(self._qpre, self._qnow, 2.0+acceleration, False) - self._qpre, self._qnow = self._qnow, q - - def matrix(self): - """Return homogeneous rotation matrix.""" - return quaternion_matrix(self._qnow) - - -def arcball_map_to_sphere(point, center, radius): - """Return unit sphere coordinates from window coordinates.""" - v = numpy.array(((point[0] - center[0]) / radius, - (center[1] - point[1]) / radius, - 0.0), dtype=numpy.float64) - n = v[0]*v[0] + v[1]*v[1] - if n > 1.0: - v /= math.sqrt(n) # position outside of sphere - else: - v[2] = math.sqrt(1.0 - n) - return v - - -def arcball_constrain_to_axis(point, axis): - """Return sphere point perpendicular to axis.""" - v = numpy.array(point, dtype=numpy.float64, copy=True) - a = numpy.array(axis, dtype=numpy.float64, copy=True) - v -= a * numpy.dot(a, v) # on plane - n = vector_norm(v) - if n > _EPS: - if v[2] < 0.0: - v *= -1.0 - v /= n - return v - if a[2] == 1.0: - return numpy.array([1, 0, 0], dtype=numpy.float64) - return unit_vector([-a[1], a[0], 0]) - - -def arcball_nearest_axis(point, axes): - """Return axis, which arc is nearest to point.""" - point = numpy.array(point, dtype=numpy.float64, copy=False) - nearest = None - mx = -1.0 - for axis in axes: - t = numpy.dot(arcball_constrain_to_axis(point, axis), point) - if t > mx: - nearest = axis - mx = t - return nearest - - -# epsilon for testing whether a number is close to zero -_EPS = numpy.finfo(float).eps * 4.0 - -# axis sequences for Euler angles -_NEXT_AXIS = [1, 2, 0, 1] - -# map axes strings to/from tuples of inner axis, parity, repetition, frame -_AXES2TUPLE = { - 'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0), - 'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0), - 'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0), - 'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0), - 'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1), - 'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1), - 'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1), - 'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)} - -_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items()) - -# helper functions - -def vector_norm(data, axis=None, out=None): - """Return length, i.e. eucledian norm, of ndarray along axis. - - >>> v = numpy.random.random(3) - >>> n = vector_norm(v) - >>> numpy.allclose(n, numpy.linalg.norm(v)) - True - >>> v = numpy.random.rand(6, 5, 3) - >>> n = vector_norm(v, axis=-1) - >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2))) - True - >>> n = vector_norm(v, axis=1) - >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) - True - >>> v = numpy.random.rand(5, 4, 3) - >>> n = numpy.empty((5, 3), dtype=numpy.float64) - >>> vector_norm(v, axis=1, out=n) - >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) - True - >>> vector_norm([]) - 0.0 - >>> vector_norm([1.0]) - 1.0 - - """ - data = numpy.array(data, dtype=numpy.float64, copy=True) - if out is None: - if data.ndim == 1: - return math.sqrt(numpy.dot(data, data)) - data *= data - out = numpy.atleast_1d(numpy.sum(data, axis=axis)) - numpy.sqrt(out, out) - return out - else: - data *= data - numpy.sum(data, axis=axis, out=out) - numpy.sqrt(out, out) - - -def unit_vector(data, axis=None, out=None): - """Return ndarray normalized by length, i.e. eucledian norm, along axis. - - >>> v0 = numpy.random.random(3) - >>> v1 = unit_vector(v0) - >>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0)) - True - >>> v0 = numpy.random.rand(5, 4, 3) - >>> v1 = unit_vector(v0, axis=-1) - >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2) - >>> numpy.allclose(v1, v2) - True - >>> v1 = unit_vector(v0, axis=1) - >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1) - >>> numpy.allclose(v1, v2) - True - >>> v1 = numpy.empty((5, 4, 3), dtype=numpy.float64) - >>> unit_vector(v0, axis=1, out=v1) - >>> numpy.allclose(v1, v2) - True - >>> list(unit_vector([])) - [] - >>> list(unit_vector([1.0])) - [1.0] - - """ - if out is None: - data = numpy.array(data, dtype=numpy.float64, copy=True) - if data.ndim == 1: - data /= math.sqrt(numpy.dot(data, data)) - return data - else: - if out is not data: - out[:] = numpy.array(data, copy=False) - data = out - length = numpy.atleast_1d(numpy.sum(data*data, axis)) - numpy.sqrt(length, length) - if axis is not None: - length = numpy.expand_dims(length, axis) - data /= length - if out is None: - return data - - -def random_vector(size): - """Return array of random doubles in the half-open interval [0.0, 1.0). - - >>> v = random_vector(10000) - >>> numpy.all(v >= 0.0) and numpy.all(v < 1.0) - True - >>> v0 = random_vector(10) - >>> v1 = random_vector(10) - >>> numpy.any(v0 == v1) - False - - """ - return numpy.random.random(size) - - -def inverse_matrix(matrix): - """Return inverse of square transformation matrix. - - >>> M0 = random_rotation_matrix() - >>> M1 = inverse_matrix(M0.T) - >>> numpy.allclose(M1, numpy.linalg.inv(M0.T)) - True - >>> for size in range(1, 7): - ... M0 = numpy.random.rand(size, size) - ... M1 = inverse_matrix(M0) - ... if not numpy.allclose(M1, numpy.linalg.inv(M0)): print size - - """ - return numpy.linalg.inv(matrix) - - -def concatenate_matrices(*matrices): - """Return concatenation of series of transformation matrices. - - >>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5 - >>> numpy.allclose(M, concatenate_matrices(M)) - True - >>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T)) - True - - """ - M = numpy.identity(4) - for i in matrices: - M = numpy.dot(M, i) - return M - - -def is_same_transform(matrix0, matrix1): - """Return True if two matrices perform same transformation. - - >>> is_same_transform(numpy.identity(4), numpy.identity(4)) - True - >>> is_same_transform(numpy.identity(4), random_rotation_matrix()) - False - - """ - matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True) - matrix0 /= matrix0[3, 3] - matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True) - matrix1 /= matrix1[3, 3] - return numpy.allclose(matrix0, matrix1) - - -def _import_module(module_name, warn=True, prefix='_py_', ignore='_'): - """Try import all public attributes from module into global namespace. - - Existing attributes with name clashes are renamed with prefix. - Attributes starting with underscore are ignored by default. - - Return True on successful import. - - """ - try: - module = __import__(module_name) - except ImportError: - if warn: - warnings.warn("Failed to import module " + module_name) - else: - for attr in dir(module): - if ignore and attr.startswith(ignore): - continue - if prefix: - if attr in globals(): - globals()[prefix + attr] = globals()[attr] - elif warn: - warnings.warn("No Python implementation of " + attr) - globals()[attr] = getattr(module, attr) - return True \ No newline at end of file diff --git a/legacy/common_math/srv/TfTransform.srv b/legacy/common_math/srv/TfTransform.srv deleted file mode 100644 index 9d8833524..000000000 --- a/legacy/common_math/srv/TfTransform.srv +++ /dev/null @@ -1,8 +0,0 @@ -geometry_msgs/PoseArray pose_array -sensor_msgs/PointCloud2 pointcloud -geometry_msgs/PointStamped point -std_msgs/String target_frame ---- -geometry_msgs/PoseArray target_pose_array -sensor_msgs/PointCloud2 target_pointcloud -geometry_msgs/PointStamped target_point \ No newline at end of file diff --git a/legacy/cv_bridge3/README.md b/legacy/cv_bridge3/README.md deleted file mode 100644 index f982ff4e2..000000000 --- a/legacy/cv_bridge3/README.md +++ /dev/null @@ -1,50 +0,0 @@ -# cv_bridge3 - -The cv_bridge3 package - -This package is maintained by: -- [jared](mailto:jared@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- catkin_virtualenv (build) - -This packages requires Python 3.10 to be present. - -This package has 2 Python dependencies: -- [opencv-python](https://pypi.org/project/opencv-python)==4.8.0.76 -- .. and 1 sub dependencies - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -This package has no launch files. - -### Messages - -This package has no messages. - -### Services - -This package has no services. - -### Actions - -This package has no actions. diff --git a/legacy/cv_bridge3/requirements.in b/legacy/cv_bridge3/requirements.in deleted file mode 100644 index 6c31834e2..000000000 --- a/legacy/cv_bridge3/requirements.in +++ /dev/null @@ -1 +0,0 @@ -opencv-python==4.8.0.76 diff --git a/legacy/cv_bridge3/requirements.txt b/legacy/cv_bridge3/requirements.txt deleted file mode 100644 index cd1e4c13c..000000000 --- a/legacy/cv_bridge3/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -numpy==1.25.2 # via opencv-python -opencv-python==4.8.0.76 # via -r requirements.in diff --git a/legacy/cv_bridge3/setup.py b/legacy/cv_bridge3/setup.py deleted file mode 100644 index 3fb001cbc..000000000 --- a/legacy/cv_bridge3/setup.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -# fetch values from package.xml -setup_args = generate_distutils_setup( - packages=['cv_bridge3'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/cv_bridge3/src/cv_bridge3/__init__.py b/legacy/cv_bridge3/src/cv_bridge3/__init__.py deleted file mode 100644 index c934c3c44..000000000 --- a/legacy/cv_bridge3/src/cv_bridge3/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from cv_bridge3.cv_bridge3 import CvBridge -from cv_bridge3.cv_bridge3 import cv2 diff --git a/legacy/cv_bridge3/src/cv_bridge3/cv_bridge3.py b/legacy/cv_bridge3/src/cv_bridge3/cv_bridge3.py deleted file mode 100644 index b8140e71b..000000000 --- a/legacy/cv_bridge3/src/cv_bridge3/cv_bridge3.py +++ /dev/null @@ -1,222 +0,0 @@ -# From https://answers.ros.org/question/350904/cv_bridge-throws-boost-import-error-in-python-3-and-ros-melodic/?answer=374931#post-id-374931 -# Fix for cv_bridge not working in Python 3 without recompilation. - -""" - Provides conversions between OpenCV and ROS image formats in a hard-coded way. - CV_Bridge, the module usually responsible for doing this, is not compatible with Python 3, - - the language this all is written in. So we create this module, and all is... well, all is not well, - - but all works. :-/ - Author(s): Gerard Canal -""" -import sys -import numpy as np -import sensor_msgs -import rospy - -# Hack to import cv2 from https://answers.ros.org/question/290660/import-cv2-error-caused-by-ros/?answer=331764#post-id-331764 -import sys -melodic_dist_pkg = '/opt/ros/melodic/lib/python2.7/dist-packages' -is_melodic = melodic_dist_pkg in sys.path -if is_melodic: - sys.path.remove(melodic_dist_pkg) # in order to import cv2 under python3 -import cv2 -if is_melodic: - sys.path.append(melodic_dist_pkg) # append back in order to import rospy - - -class CvBridge: - def __init__(self): - import cv2 - self.cvtype_to_name = {} - self.cvdepth_to_numpy_depth = {cv2.CV_8U: 'uint8', cv2.CV_8S: 'int8', cv2.CV_16U: 'uint16', - cv2.CV_16S: 'int16', cv2.CV_32S:'int32', cv2.CV_32F:'float32', - cv2.CV_64F: 'float64'} - - for t in ["8U", "8S", "16U", "16S", "32S", "32F", "64F"]: - for c in [1, 2, 3, 4]: - nm = "%sC%d" % (t, c) - self.cvtype_to_name[getattr(cv2, "CV_%s" % nm)] = nm - - self.numpy_type_to_cvtype = {'uint8': '8U', 'int8': '8S', 'uint16': '16U', - 'int16': '16S', 'int32': '32S', 'float32': '32F', - 'float64': '64F'} - self.numpy_type_to_cvtype.update(dict((v, k) for (k, v) in self.numpy_type_to_cvtype.items())) - - def dtype_with_channels_to_cvtype2(self, dtype, n_channels): - return '%sC%d' % (self.numpy_type_to_cvtype[dtype.name], n_channels) - - def cvtype2_to_dtype_with_channels(self, cvtype): - from cv_bridge.boost.cv_bridge_boost import CV_MAT_CNWrap, CV_MAT_DEPTHWrap - return self.cvdepth_to_numpy_depth[CV_MAT_DEPTHWrap(cvtype)], CV_MAT_CNWrap(cvtype) - - def encoding_to_cvtype2(self, encoding): - from cv_bridge.boost.cv_bridge_boost import getCvType - - try: - return getCvType(encoding) - except RuntimeError as e: - raise CvBridgeError(e) - - def encoding_to_dtype_with_channels(self, encoding): - return self.cvtype2_to_dtype_with_channels(self.encoding_to_cvtype2(encoding)) - - def compressed_imgmsg_to_cv2(self, cmprs_img_msg, desired_encoding = "passthrough"): - """ - Convert a sensor_msgs::CompressedImage message to an OpenCV :cpp:type:`cv::Mat`. - :param cmprs_img_msg: A :cpp:type:`sensor_msgs::CompressedImage` message - :param desired_encoding: The encoding of the image data, one of the following strings: - * ``"passthrough"`` - * one of the standard strings in sensor_msgs/image_encodings.h - :rtype: :cpp:type:`cv::Mat` - :raises CvBridgeError: when conversion is not possible. - If desired_encoding is ``"passthrough"``, then the returned image has the same format as img_msg. - Otherwise desired_encoding must be one of the standard image encodings - This function returns an OpenCV :cpp:type:`cv::Mat` message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure. - If the image only has one channel, the shape has size 2 (width and height) - """ - import cv2 - import numpy as np - - str_msg = cmprs_img_msg.data - buf = np.ndarray(shape=(1, len(str_msg)), - dtype=np.uint8, buffer=cmprs_img_msg.data) - im = cv2.imdecode(buf, cv2.IMREAD_UNCHANGED) - - if desired_encoding == "passthrough": - return im - - from cv_bridge.boost.cv_bridge_boost import cvtColor2 - - try: - res = cvtColor2(im, "bgr8", desired_encoding) - except RuntimeError as e: - raise CvBridgeError(e) - - return res - - def imgmsg_to_cv2_np(self, img_msg): - im = np.frombuffer(img_msg.data, dtype=np.uint8).reshape(img_msg.height, img_msg.width, -1) - print(img_msg.encoding) - if img_msg.encoding == 'rgb8': - return cv2.cvtColor(im, cv2.COLOR_RGB2BGR) - elif img_msg.encoding in ["bgr8", "8UC3", "mono8"]: - return im - - - def imgmsg_to_cv2(self, img_msg, desired_encoding = "passthrough"): - """ - Convert a sensor_msgs::Image message to an OpenCV :cpp:type:`cv::Mat`. - :param img_msg: A :cpp:type:`sensor_msgs::Image` message - :param desired_encoding: The encoding of the image data, one of the following strings: - * ``"passthrough"`` - * one of the standard strings in sensor_msgs/image_encodings.h - :rtype: :cpp:type:`cv::Mat` - :raises CvBridgeError: when conversion is not possible. - If desired_encoding is ``"passthrough"``, then the returned image has the same format as img_msg. - Otherwise desired_encoding must be one of the standard image encodings - This function returns an OpenCV :cpp:type:`cv::Mat` message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure. - If the image only has one channel, the shape has size 2 (width and height) - """ - import cv2 - import numpy as np - dtype, n_channels = self.encoding_to_dtype_with_channels(img_msg.encoding) - dtype = np.dtype(dtype) - dtype = dtype.newbyteorder('>' if img_msg.is_bigendian else '<') - if n_channels == 1: - im = np.ndarray(shape=(img_msg.height, img_msg.width), - dtype=dtype, buffer=img_msg.data) - else: - if(type(img_msg.data) == str): - im = np.ndarray(shape=(img_msg.height, img_msg.width, n_channels), - dtype=dtype, buffer=img_msg.data.encode()) - else: - im = np.ndarray(shape=(img_msg.height, img_msg.width, n_channels), - dtype=dtype, buffer=img_msg.data) - # If the byte order is different between the message and the system. - if img_msg.is_bigendian == (sys.byteorder == 'little'): - im = im.byteswap().newbyteorder() - - if desired_encoding == "passthrough": - return im - - from cv_bridge.boost.cv_bridge_boost import cvtColor2 - - try: - res = cvtColor2(im, img_msg.encoding, desired_encoding) - except RuntimeError as e: - raise CvBridgeError(e) - - return res - - def cv2_to_compressed_imgmsg(self, cvim, dst_format = "jpg"): - """ - Convert an OpenCV :cpp:type:`cv::Mat` type to a ROS sensor_msgs::CompressedImage message. - :param cvim: An OpenCV :cpp:type:`cv::Mat` - :param dst_format: The format of the image data, one of the following strings: - * from http://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html - * from http://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#Mat imread(const string& filename, int flags) - * bmp, dib - * jpeg, jpg, jpe - * jp2 - * png - * pbm, pgm, ppm - * sr, ras - * tiff, tif - :rtype: A sensor_msgs.msg.CompressedImage message - :raises CvBridgeError: when the ``cvim`` has a type that is incompatible with ``format`` - This function returns a sensor_msgs::Image message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure. - """ - import cv2 - import numpy as np - if not isinstance(cvim, (np.ndarray, np.generic)): - raise TypeError('Your input type is not a numpy array') - cmprs_img_msg = sensor_msgs.msg.CompressedImage() - cmprs_img_msg.format = dst_format - ext_format = '.' + dst_format - try: - cmprs_img_msg.data = np.array(cv2.imencode(ext_format, cvim)[1]).tostring() - except RuntimeError as e: - raise CvBridgeError(e) - - return cmprs_img_msg - - def cv2_to_imgmsg(self, cvim, encoding = "passthrough", header = None): - """ - Convert an OpenCV :cpp:type:`cv::Mat` type to a ROS sensor_msgs::Image message. - :param cvim: An OpenCV :cpp:type:`cv::Mat` - :param encoding: The encoding of the image data, one of the following strings: - * ``"passthrough"`` - * one of the standard strings in sensor_msgs/image_encodings.h - :param header: A std_msgs.msg.Header message - :rtype: A sensor_msgs.msg.Image message - :raises CvBridgeError: when the ``cvim`` has a type that is incompatible with ``encoding`` - If encoding is ``"passthrough"``, then the message has the same encoding as the image's OpenCV type. - Otherwise desired_encoding must be one of the standard image encodings - This function returns a sensor_msgs::Image message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure. - """ - import cv2 - import numpy as np - if not isinstance(cvim, (np.ndarray, np.generic)): - raise TypeError('Your input type is not a numpy array') - img_msg = sensor_msgs.msg.Image() - img_msg.height = cvim.shape[0] - img_msg.width = cvim.shape[1] - if header is not None: - img_msg.header = header - if len(cvim.shape) < 3: - cv_type = self.dtype_with_channels_to_cvtype2(cvim.dtype, 1) - else: - cv_type = self.dtype_with_channels_to_cvtype2(cvim.dtype, cvim.shape[2]) - if encoding == "passthrough": - img_msg.encoding = cv_type - else: - img_msg.encoding = encoding - # Verify that the supplied encoding is compatible with the type of the OpenCV image - if self.cvtype_to_name[self.encoding_to_cvtype2(encoding)] != cv_type: - raise CvBridgeError("encoding specified as %s, but image has incompatible type %s" % (encoding, cv_type)) - if cvim.dtype.byteorder == '>': - img_msg.is_bigendian = True - img_msg.data = cvim.tostring() - img_msg.step = len(img_msg.data) // img_msg.height - - return img_msg \ No newline at end of file diff --git a/legacy/face_detection/CMakeLists.txt b/legacy/face_detection/CMakeLists.txt deleted file mode 100644 index f54e4bdd9..000000000 --- a/legacy/face_detection/CMakeLists.txt +++ /dev/null @@ -1,208 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(face_detection) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - rospy - std_msgs - sensor_msgs - message_runtime - message_generation - lasr_vision_msgs -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -#add_message_files( -# FILES -# Detection.msg -#) - -## Generate services in the 'srv' folder -add_service_files( - FILES - FaceDetection.srv -) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -generate_messages( - DEPENDENCIES - std_msgs - sensor_msgs - lasr_vision_msgs -) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES lasr_object_detection_yolo -# CATKIN_DEPENDS rospy std_msgs -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/lasr_object_detection_yolo.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/lasr_object_detection_yolo_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_lasr_object_detection_yolo.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/legacy/face_detection/README.md b/legacy/face_detection/README.md deleted file mode 100644 index ea1f83c96..000000000 --- a/legacy/face_detection/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# face_detection - -The face_detection package - -This package is maintained by: -- [nicole](mailto:nicole@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- rospy (build) -- std_msgs (build) -- rospy (exec) -- std_msgs (exec) -- lasr_vision_msgs - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -This package has no launch files. - -### Messages - -This package has no messages. - -### Services - -#### `FaceDetection` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| image_raw | sensor_msgs/Image | | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| detected_objects | lasr_vision_msgs/Detection[] | | - -#### `FaceDetectionPCL` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| cloud | sensor_msgs/PointCloud2 | | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| detections | face_detection/DetectionPCL[] | | - - -### Actions - -This package has no actions. diff --git a/legacy/face_detection/caffe_model/deploy.prototxt b/legacy/face_detection/caffe_model/deploy.prototxt deleted file mode 100644 index 77a4bdb1f..000000000 --- a/legacy/face_detection/caffe_model/deploy.prototxt +++ /dev/null @@ -1,1789 +0,0 @@ -input: "data" -input_shape { - dim: 1 - dim: 3 - dim: 300 - dim: 300 -} - -layer { - name: "data_bn" - type: "BatchNorm" - bottom: "data" - top: "data_bn" - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } -} -layer { - name: "data_scale" - type: "Scale" - bottom: "data_bn" - top: "data_bn" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 2.0 - decay_mult: 1.0 - } - scale_param { - bias_term: true - } -} -layer { - name: "conv1_h" - type: "Convolution" - bottom: "data_bn" - top: "conv1_h" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 2.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 3 - kernel_size: 7 - stride: 2 - weight_filler { - type: "msra" - variance_norm: FAN_OUT - } - bias_filler { - type: "constant" - value: 0.0 - } - } -} -layer { - name: "conv1_bn_h" - type: "BatchNorm" - bottom: "conv1_h" - top: "conv1_h" - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } -} -layer { - name: "conv1_scale_h" - type: "Scale" - bottom: "conv1_h" - top: "conv1_h" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 2.0 - decay_mult: 1.0 - } - scale_param { - bias_term: true - } -} -layer { - name: "conv1_relu" - type: "ReLU" - bottom: "conv1_h" - top: "conv1_h" -} -layer { - name: "conv1_pool" - type: "Pooling" - bottom: "conv1_h" - top: "conv1_pool" - pooling_param { - kernel_size: 3 - stride: 2 - } -} -layer { - name: "layer_64_1_conv1_h" - type: "Convolution" - bottom: "conv1_pool" - top: "layer_64_1_conv1_h" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - bias_term: false - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "msra" - } - bias_filler { - type: "constant" - value: 0.0 - } - } -} -layer { - name: "layer_64_1_bn2_h" - type: "BatchNorm" - bottom: "layer_64_1_conv1_h" - top: "layer_64_1_conv1_h" - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } -} -layer { - name: "layer_64_1_scale2_h" - type: "Scale" - bottom: "layer_64_1_conv1_h" - top: "layer_64_1_conv1_h" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 2.0 - decay_mult: 1.0 - } - scale_param { - bias_term: true - } -} -layer { - name: "layer_64_1_relu2" - type: "ReLU" - bottom: "layer_64_1_conv1_h" - top: "layer_64_1_conv1_h" -} -layer { - name: "layer_64_1_conv2_h" - type: "Convolution" - bottom: "layer_64_1_conv1_h" - top: "layer_64_1_conv2_h" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - bias_term: false - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "msra" - } - bias_filler { - type: "constant" - value: 0.0 - } - } -} -layer { - name: "layer_64_1_sum" - type: "Eltwise" - bottom: "layer_64_1_conv2_h" - bottom: "conv1_pool" - top: "layer_64_1_sum" -} -layer { - name: "layer_128_1_bn1_h" - type: "BatchNorm" - bottom: "layer_64_1_sum" - top: "layer_128_1_bn1_h" - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } -} -layer { - name: "layer_128_1_scale1_h" - type: "Scale" - bottom: "layer_128_1_bn1_h" - top: "layer_128_1_bn1_h" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 2.0 - decay_mult: 1.0 - } - scale_param { - bias_term: true - } -} -layer { - name: "layer_128_1_relu1" - type: "ReLU" - bottom: "layer_128_1_bn1_h" - top: "layer_128_1_bn1_h" -} -layer { - name: "layer_128_1_conv1_h" - type: "Convolution" - bottom: "layer_128_1_bn1_h" - top: "layer_128_1_conv1_h" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - bias_term: false - pad: 1 - kernel_size: 3 - stride: 2 - weight_filler { - type: "msra" - } - bias_filler { - type: "constant" - value: 0.0 - } - } -} -layer { - name: "layer_128_1_bn2" - type: "BatchNorm" - bottom: "layer_128_1_conv1_h" - top: "layer_128_1_conv1_h" - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } -} -layer { - name: "layer_128_1_scale2" - type: "Scale" - bottom: "layer_128_1_conv1_h" - top: "layer_128_1_conv1_h" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 2.0 - decay_mult: 1.0 - } - scale_param { - bias_term: true - } -} -layer { - name: "layer_128_1_relu2" - type: "ReLU" - bottom: "layer_128_1_conv1_h" - top: "layer_128_1_conv1_h" -} -layer { - name: "layer_128_1_conv2" - type: "Convolution" - bottom: "layer_128_1_conv1_h" - top: "layer_128_1_conv2" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - bias_term: false - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "msra" - } - bias_filler { - type: "constant" - value: 0.0 - } - } -} -layer { - name: "layer_128_1_conv_expand_h" - type: "Convolution" - bottom: "layer_128_1_bn1_h" - top: "layer_128_1_conv_expand_h" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - bias_term: false - pad: 0 - kernel_size: 1 - stride: 2 - weight_filler { - type: "msra" - } - bias_filler { - type: "constant" - value: 0.0 - } - } -} -layer { - name: "layer_128_1_sum" - type: "Eltwise" - bottom: "layer_128_1_conv2" - bottom: "layer_128_1_conv_expand_h" - top: "layer_128_1_sum" -} -layer { - name: "layer_256_1_bn1" - type: "BatchNorm" - bottom: "layer_128_1_sum" - top: "layer_256_1_bn1" - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } -} -layer { - name: "layer_256_1_scale1" - type: "Scale" - bottom: "layer_256_1_bn1" - top: "layer_256_1_bn1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 2.0 - decay_mult: 1.0 - } - scale_param { - bias_term: true - } -} -layer { - name: "layer_256_1_relu1" - type: "ReLU" - bottom: "layer_256_1_bn1" - top: "layer_256_1_bn1" -} -layer { - name: "layer_256_1_conv1" - type: "Convolution" - bottom: "layer_256_1_bn1" - top: "layer_256_1_conv1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 256 - bias_term: false - pad: 1 - kernel_size: 3 - stride: 2 - weight_filler { - type: "msra" - } - bias_filler { - type: "constant" - value: 0.0 - } - } -} -layer { - name: "layer_256_1_bn2" - type: "BatchNorm" - bottom: "layer_256_1_conv1" - top: "layer_256_1_conv1" - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } -} -layer { - name: "layer_256_1_scale2" - type: "Scale" - bottom: "layer_256_1_conv1" - top: "layer_256_1_conv1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 2.0 - decay_mult: 1.0 - } - scale_param { - bias_term: true - } -} -layer { - name: "layer_256_1_relu2" - type: "ReLU" - bottom: "layer_256_1_conv1" - top: "layer_256_1_conv1" -} -layer { - name: "layer_256_1_conv2" - type: "Convolution" - bottom: "layer_256_1_conv1" - top: "layer_256_1_conv2" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 256 - bias_term: false - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "msra" - } - bias_filler { - type: "constant" - value: 0.0 - } - } -} -layer { - name: "layer_256_1_conv_expand" - type: "Convolution" - bottom: "layer_256_1_bn1" - top: "layer_256_1_conv_expand" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 256 - bias_term: false - pad: 0 - kernel_size: 1 - stride: 2 - weight_filler { - type: "msra" - } - bias_filler { - type: "constant" - value: 0.0 - } - } -} -layer { - name: "layer_256_1_sum" - type: "Eltwise" - bottom: "layer_256_1_conv2" - bottom: "layer_256_1_conv_expand" - top: "layer_256_1_sum" -} -layer { - name: "layer_512_1_bn1" - type: "BatchNorm" - bottom: "layer_256_1_sum" - top: "layer_512_1_bn1" - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } -} -layer { - name: "layer_512_1_scale1" - type: "Scale" - bottom: "layer_512_1_bn1" - top: "layer_512_1_bn1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 2.0 - decay_mult: 1.0 - } - scale_param { - bias_term: true - } -} -layer { - name: "layer_512_1_relu1" - type: "ReLU" - bottom: "layer_512_1_bn1" - top: "layer_512_1_bn1" -} -layer { - name: "layer_512_1_conv1_h" - type: "Convolution" - bottom: "layer_512_1_bn1" - top: "layer_512_1_conv1_h" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - bias_term: false - pad: 1 - kernel_size: 3 - stride: 1 # 2 - weight_filler { - type: "msra" - } - bias_filler { - type: "constant" - value: 0.0 - } - } -} -layer { - name: "layer_512_1_bn2_h" - type: "BatchNorm" - bottom: "layer_512_1_conv1_h" - top: "layer_512_1_conv1_h" - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } -} -layer { - name: "layer_512_1_scale2_h" - type: "Scale" - bottom: "layer_512_1_conv1_h" - top: "layer_512_1_conv1_h" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 2.0 - decay_mult: 1.0 - } - scale_param { - bias_term: true - } -} -layer { - name: "layer_512_1_relu2" - type: "ReLU" - bottom: "layer_512_1_conv1_h" - top: "layer_512_1_conv1_h" -} -layer { - name: "layer_512_1_conv2_h" - type: "Convolution" - bottom: "layer_512_1_conv1_h" - top: "layer_512_1_conv2_h" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 256 - bias_term: false - pad: 2 # 1 - kernel_size: 3 - stride: 1 - dilation: 2 - weight_filler { - type: "msra" - } - bias_filler { - type: "constant" - value: 0.0 - } - } -} -layer { - name: "layer_512_1_conv_expand_h" - type: "Convolution" - bottom: "layer_512_1_bn1" - top: "layer_512_1_conv_expand_h" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 256 - bias_term: false - pad: 0 - kernel_size: 1 - stride: 1 # 2 - weight_filler { - type: "msra" - } - bias_filler { - type: "constant" - value: 0.0 - } - } -} -layer { - name: "layer_512_1_sum" - type: "Eltwise" - bottom: "layer_512_1_conv2_h" - bottom: "layer_512_1_conv_expand_h" - top: "layer_512_1_sum" -} -layer { - name: "last_bn_h" - type: "BatchNorm" - bottom: "layer_512_1_sum" - top: "layer_512_1_sum" - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } - param { - lr_mult: 0.0 - } -} -layer { - name: "last_scale_h" - type: "Scale" - bottom: "layer_512_1_sum" - top: "layer_512_1_sum" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 2.0 - decay_mult: 1.0 - } - scale_param { - bias_term: true - } -} -layer { - name: "last_relu" - type: "ReLU" - bottom: "layer_512_1_sum" - top: "fc7" -} - -layer { - name: "conv6_1_h" - type: "Convolution" - bottom: "fc7" - top: "conv6_1_h" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 128 - pad: 0 - kernel_size: 1 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv6_1_relu" - type: "ReLU" - bottom: "conv6_1_h" - top: "conv6_1_h" -} -layer { - name: "conv6_2_h" - type: "Convolution" - bottom: "conv6_1_h" - top: "conv6_2_h" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 256 - pad: 1 - kernel_size: 3 - stride: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv6_2_relu" - type: "ReLU" - bottom: "conv6_2_h" - top: "conv6_2_h" -} -layer { - name: "conv7_1_h" - type: "Convolution" - bottom: "conv6_2_h" - top: "conv7_1_h" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 64 - pad: 0 - kernel_size: 1 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv7_1_relu" - type: "ReLU" - bottom: "conv7_1_h" - top: "conv7_1_h" -} -layer { - name: "conv7_2_h" - type: "Convolution" - bottom: "conv7_1_h" - top: "conv7_2_h" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 128 - pad: 1 - kernel_size: 3 - stride: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv7_2_relu" - type: "ReLU" - bottom: "conv7_2_h" - top: "conv7_2_h" -} -layer { - name: "conv8_1_h" - type: "Convolution" - bottom: "conv7_2_h" - top: "conv8_1_h" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 64 - pad: 0 - kernel_size: 1 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv8_1_relu" - type: "ReLU" - bottom: "conv8_1_h" - top: "conv8_1_h" -} -layer { - name: "conv8_2_h" - type: "Convolution" - bottom: "conv8_1_h" - top: "conv8_2_h" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 128 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv8_2_relu" - type: "ReLU" - bottom: "conv8_2_h" - top: "conv8_2_h" -} -layer { - name: "conv9_1_h" - type: "Convolution" - bottom: "conv8_2_h" - top: "conv9_1_h" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 64 - pad: 0 - kernel_size: 1 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv9_1_relu" - type: "ReLU" - bottom: "conv9_1_h" - top: "conv9_1_h" -} -layer { - name: "conv9_2_h" - type: "Convolution" - bottom: "conv9_1_h" - top: "conv9_2_h" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 128 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv9_2_relu" - type: "ReLU" - bottom: "conv9_2_h" - top: "conv9_2_h" -} -layer { - name: "conv4_3_norm" - type: "Normalize" - bottom: "layer_256_1_bn1" - top: "conv4_3_norm" - norm_param { - across_spatial: false - scale_filler { - type: "constant" - value: 20 - } - channel_shared: false - } -} -layer { - name: "conv4_3_norm_mbox_loc" - type: "Convolution" - bottom: "conv4_3_norm" - top: "conv4_3_norm_mbox_loc" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 16 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv4_3_norm_mbox_loc_perm" - type: "Permute" - bottom: "conv4_3_norm_mbox_loc" - top: "conv4_3_norm_mbox_loc_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "conv4_3_norm_mbox_loc_flat" - type: "Flatten" - bottom: "conv4_3_norm_mbox_loc_perm" - top: "conv4_3_norm_mbox_loc_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "conv4_3_norm_mbox_conf" - type: "Convolution" - bottom: "conv4_3_norm" - top: "conv4_3_norm_mbox_conf" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 8 # 84 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv4_3_norm_mbox_conf_perm" - type: "Permute" - bottom: "conv4_3_norm_mbox_conf" - top: "conv4_3_norm_mbox_conf_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "conv4_3_norm_mbox_conf_flat" - type: "Flatten" - bottom: "conv4_3_norm_mbox_conf_perm" - top: "conv4_3_norm_mbox_conf_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "conv4_3_norm_mbox_priorbox" - type: "PriorBox" - bottom: "conv4_3_norm" - bottom: "data" - top: "conv4_3_norm_mbox_priorbox" - prior_box_param { - min_size: 30.0 - max_size: 60.0 - aspect_ratio: 2 - flip: true - clip: false - variance: 0.1 - variance: 0.1 - variance: 0.2 - variance: 0.2 - step: 8 - offset: 0.5 - } -} -layer { - name: "fc7_mbox_loc" - type: "Convolution" - bottom: "fc7" - top: "fc7_mbox_loc" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 24 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "fc7_mbox_loc_perm" - type: "Permute" - bottom: "fc7_mbox_loc" - top: "fc7_mbox_loc_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "fc7_mbox_loc_flat" - type: "Flatten" - bottom: "fc7_mbox_loc_perm" - top: "fc7_mbox_loc_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "fc7_mbox_conf" - type: "Convolution" - bottom: "fc7" - top: "fc7_mbox_conf" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 12 # 126 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "fc7_mbox_conf_perm" - type: "Permute" - bottom: "fc7_mbox_conf" - top: "fc7_mbox_conf_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "fc7_mbox_conf_flat" - type: "Flatten" - bottom: "fc7_mbox_conf_perm" - top: "fc7_mbox_conf_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "fc7_mbox_priorbox" - type: "PriorBox" - bottom: "fc7" - bottom: "data" - top: "fc7_mbox_priorbox" - prior_box_param { - min_size: 60.0 - max_size: 111.0 - aspect_ratio: 2 - aspect_ratio: 3 - flip: true - clip: false - variance: 0.1 - variance: 0.1 - variance: 0.2 - variance: 0.2 - step: 16 - offset: 0.5 - } -} -layer { - name: "conv6_2_mbox_loc" - type: "Convolution" - bottom: "conv6_2_h" - top: "conv6_2_mbox_loc" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 24 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv6_2_mbox_loc_perm" - type: "Permute" - bottom: "conv6_2_mbox_loc" - top: "conv6_2_mbox_loc_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "conv6_2_mbox_loc_flat" - type: "Flatten" - bottom: "conv6_2_mbox_loc_perm" - top: "conv6_2_mbox_loc_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "conv6_2_mbox_conf" - type: "Convolution" - bottom: "conv6_2_h" - top: "conv6_2_mbox_conf" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 12 # 126 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv6_2_mbox_conf_perm" - type: "Permute" - bottom: "conv6_2_mbox_conf" - top: "conv6_2_mbox_conf_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "conv6_2_mbox_conf_flat" - type: "Flatten" - bottom: "conv6_2_mbox_conf_perm" - top: "conv6_2_mbox_conf_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "conv6_2_mbox_priorbox" - type: "PriorBox" - bottom: "conv6_2_h" - bottom: "data" - top: "conv6_2_mbox_priorbox" - prior_box_param { - min_size: 111.0 - max_size: 162.0 - aspect_ratio: 2 - aspect_ratio: 3 - flip: true - clip: false - variance: 0.1 - variance: 0.1 - variance: 0.2 - variance: 0.2 - step: 32 - offset: 0.5 - } -} -layer { - name: "conv7_2_mbox_loc" - type: "Convolution" - bottom: "conv7_2_h" - top: "conv7_2_mbox_loc" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 24 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv7_2_mbox_loc_perm" - type: "Permute" - bottom: "conv7_2_mbox_loc" - top: "conv7_2_mbox_loc_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "conv7_2_mbox_loc_flat" - type: "Flatten" - bottom: "conv7_2_mbox_loc_perm" - top: "conv7_2_mbox_loc_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "conv7_2_mbox_conf" - type: "Convolution" - bottom: "conv7_2_h" - top: "conv7_2_mbox_conf" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 12 # 126 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv7_2_mbox_conf_perm" - type: "Permute" - bottom: "conv7_2_mbox_conf" - top: "conv7_2_mbox_conf_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "conv7_2_mbox_conf_flat" - type: "Flatten" - bottom: "conv7_2_mbox_conf_perm" - top: "conv7_2_mbox_conf_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "conv7_2_mbox_priorbox" - type: "PriorBox" - bottom: "conv7_2_h" - bottom: "data" - top: "conv7_2_mbox_priorbox" - prior_box_param { - min_size: 162.0 - max_size: 213.0 - aspect_ratio: 2 - aspect_ratio: 3 - flip: true - clip: false - variance: 0.1 - variance: 0.1 - variance: 0.2 - variance: 0.2 - step: 64 - offset: 0.5 - } -} -layer { - name: "conv8_2_mbox_loc" - type: "Convolution" - bottom: "conv8_2_h" - top: "conv8_2_mbox_loc" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 16 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv8_2_mbox_loc_perm" - type: "Permute" - bottom: "conv8_2_mbox_loc" - top: "conv8_2_mbox_loc_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "conv8_2_mbox_loc_flat" - type: "Flatten" - bottom: "conv8_2_mbox_loc_perm" - top: "conv8_2_mbox_loc_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "conv8_2_mbox_conf" - type: "Convolution" - bottom: "conv8_2_h" - top: "conv8_2_mbox_conf" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 8 # 84 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv8_2_mbox_conf_perm" - type: "Permute" - bottom: "conv8_2_mbox_conf" - top: "conv8_2_mbox_conf_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "conv8_2_mbox_conf_flat" - type: "Flatten" - bottom: "conv8_2_mbox_conf_perm" - top: "conv8_2_mbox_conf_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "conv8_2_mbox_priorbox" - type: "PriorBox" - bottom: "conv8_2_h" - bottom: "data" - top: "conv8_2_mbox_priorbox" - prior_box_param { - min_size: 213.0 - max_size: 264.0 - aspect_ratio: 2 - flip: true - clip: false - variance: 0.1 - variance: 0.1 - variance: 0.2 - variance: 0.2 - step: 100 - offset: 0.5 - } -} -layer { - name: "conv9_2_mbox_loc" - type: "Convolution" - bottom: "conv9_2_h" - top: "conv9_2_mbox_loc" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 16 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv9_2_mbox_loc_perm" - type: "Permute" - bottom: "conv9_2_mbox_loc" - top: "conv9_2_mbox_loc_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "conv9_2_mbox_loc_flat" - type: "Flatten" - bottom: "conv9_2_mbox_loc_perm" - top: "conv9_2_mbox_loc_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "conv9_2_mbox_conf" - type: "Convolution" - bottom: "conv9_2_h" - top: "conv9_2_mbox_conf" - param { - lr_mult: 1 - decay_mult: 1 - } - param { - lr_mult: 2 - decay_mult: 0 - } - convolution_param { - num_output: 8 # 84 - pad: 1 - kernel_size: 3 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - value: 0 - } - } -} -layer { - name: "conv9_2_mbox_conf_perm" - type: "Permute" - bottom: "conv9_2_mbox_conf" - top: "conv9_2_mbox_conf_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "conv9_2_mbox_conf_flat" - type: "Flatten" - bottom: "conv9_2_mbox_conf_perm" - top: "conv9_2_mbox_conf_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "conv9_2_mbox_priorbox" - type: "PriorBox" - bottom: "conv9_2_h" - bottom: "data" - top: "conv9_2_mbox_priorbox" - prior_box_param { - min_size: 264.0 - max_size: 315.0 - aspect_ratio: 2 - flip: true - clip: false - variance: 0.1 - variance: 0.1 - variance: 0.2 - variance: 0.2 - step: 300 - offset: 0.5 - } -} -layer { - name: "mbox_loc" - type: "Concat" - bottom: "conv4_3_norm_mbox_loc_flat" - bottom: "fc7_mbox_loc_flat" - bottom: "conv6_2_mbox_loc_flat" - bottom: "conv7_2_mbox_loc_flat" - bottom: "conv8_2_mbox_loc_flat" - bottom: "conv9_2_mbox_loc_flat" - top: "mbox_loc" - concat_param { - axis: 1 - } -} -layer { - name: "mbox_conf" - type: "Concat" - bottom: "conv4_3_norm_mbox_conf_flat" - bottom: "fc7_mbox_conf_flat" - bottom: "conv6_2_mbox_conf_flat" - bottom: "conv7_2_mbox_conf_flat" - bottom: "conv8_2_mbox_conf_flat" - bottom: "conv9_2_mbox_conf_flat" - top: "mbox_conf" - concat_param { - axis: 1 - } -} -layer { - name: "mbox_priorbox" - type: "Concat" - bottom: "conv4_3_norm_mbox_priorbox" - bottom: "fc7_mbox_priorbox" - bottom: "conv6_2_mbox_priorbox" - bottom: "conv7_2_mbox_priorbox" - bottom: "conv8_2_mbox_priorbox" - bottom: "conv9_2_mbox_priorbox" - top: "mbox_priorbox" - concat_param { - axis: 2 - } -} - -layer { - name: "mbox_conf_reshape" - type: "Reshape" - bottom: "mbox_conf" - top: "mbox_conf_reshape" - reshape_param { - shape { - dim: 0 - dim: -1 - dim: 2 - } - } -} -layer { - name: "mbox_conf_softmax" - type: "Softmax" - bottom: "mbox_conf_reshape" - top: "mbox_conf_softmax" - softmax_param { - axis: 2 - } -} -layer { - name: "mbox_conf_flatten" - type: "Flatten" - bottom: "mbox_conf_softmax" - top: "mbox_conf_flatten" - flatten_param { - axis: 1 - } -} - -layer { - name: "detection_out" - type: "DetectionOutput" - bottom: "mbox_loc" - bottom: "mbox_conf_flatten" - bottom: "mbox_priorbox" - top: "detection_out" - include { - phase: TEST - } - detection_output_param { - num_classes: 2 - share_location: true - background_label_id: 0 - nms_param { - nms_threshold: 0.45 - top_k: 400 - } - code_type: CENTER_SIZE - keep_top_k: 200 - confidence_threshold: 0.01 - } -} \ No newline at end of file diff --git a/legacy/face_detection/caffe_model/res10_300x300_ssd_iter_140000.caffemodel b/legacy/face_detection/caffe_model/res10_300x300_ssd_iter_140000.caffemodel deleted file mode 100644 index 809dfd787..000000000 Binary files a/legacy/face_detection/caffe_model/res10_300x300_ssd_iter_140000.caffemodel and /dev/null differ diff --git a/legacy/face_detection/nn4/nn4.small2.v1.t7 b/legacy/face_detection/nn4/nn4.small2.v1.t7 deleted file mode 100644 index 8a0d01975..000000000 Binary files a/legacy/face_detection/nn4/nn4.small2.v1.t7 and /dev/null differ diff --git a/legacy/face_detection/setup.py b/legacy/face_detection/setup.py deleted file mode 100644 index 2ff4f999c..000000000 --- a/legacy/face_detection/setup.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python3 - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -setup_args = generate_distutils_setup( - packages=['face_detection'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/face_detection/src/face_detection/face_detection_server.py b/legacy/face_detection/src/face_detection/face_detection_server.py deleted file mode 100755 index 3b9ea318b..000000000 --- a/legacy/face_detection/src/face_detection/face_detection_server.py +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/env python3 -from genericpath import exists -import rospy -import rospkg - -from face_detection.srv import FaceDetection, FaceDetectionResponse, \ - FaceDetectionRequest -from lasr_vision_msgs.msg import Detection -import os -import numpy as np -import cv2 -import pickle -import imutils -from cv_bridge3 import CvBridge - -NET = os.path.join(rospkg.RosPack().get_path('face_detection'),'nn4', "nn4.small2.v1.t7") -PROTO_PATH = os.path.join(rospkg.RosPack().get_path('face_detection'), 'caffe_model', "deploy.prototxt") -MODEL_PATH = os.path.join(rospkg.RosPack().get_path('face_detection'), 'caffe_model', - "res10_300x300_ssd_iter_140000.caffemodel") -OUTPUT_PATH = os.path.join(rospkg.RosPack().get_path('face_detection'), "output") - -CONFIDENCE_THRESHOLD = 0.5 - -# -# def delete_model(): -# if os.path.exists(os.path.join(OUTPUT_PATH, "recognizer.pickle")): -# os.path.remove(os.path.join(OUTPUT_PATH, "recognizer.pickle")) -# if os.path.exists(os.path.join(OUTPUT_PATH, "le.pickle")): -# os.path.remove(os.path.join(OUTPUT_PATH, "le.pickle")) - - -class FaceDetectionServer: - """ - A Server for performing face detection and classification with OpenCV. - """ - - def __init__(self): - - # Load network and models. - self.detector = cv2.dnn.readNetFromCaffe(PROTO_PATH, MODEL_PATH) - self.embedder = cv2.dnn.readNetFromTorch(NET) - - # Bridge for conversion between cv2 and sensor_msgs/Image, and vice versa. - self.bridge = CvBridge() - # delete_model() - self.recognizer = None - self.le = None - - def load_model(self): - if not os.path.exists(os.path.join(OUTPUT_PATH, "recognizer.pickle")): - self.recognizer = None - else: - with open(os.path.join(OUTPUT_PATH, "recognizer.pickle"), "rb") as fp: - self.recognizer = pickle.loads(fp.read()) - if not os.path.exists(os.path.join(OUTPUT_PATH, "le.pickle")): - self.le = None - else: - with open(os.path.join(OUTPUT_PATH, "le.pickle"), "rb") as fp: - self.le = pickle.loads(fp.read()) - print(self.le.classes_) - - if self.recognizer and self.le: - return True - - def __call__(self, req): - """ - Core method of server. - """ - - if not self.recognizer or not self.le: - if not self.load_model(): - raise rospy.ServiceException("No model to load from.") - - # Construct empty response. - if isinstance(req, FaceDetectionRequest): - response = FaceDetectionResponse() - - # Convert sensor_msgs/Image to cv2 image. - print('i am here') - cv_image = self.bridge.imgmsg_to_cv2_np(req.image_raw) - - # else: - # response = FaceDetectionPCLResponse() - # - # # Extract rgb image from pointcloud - # cv_image = np.fromstring(req.cloud.data, dtype=np.uint8) - # cv_image = cv_image.reshape(req.cloud.height, req.cloud.width, 32) - # cv_image = cv_image[:, :, 16:19] - # - # # Ensure array is contiguous - # cv_image = np.ascontiguousarray(cv_image, dtype=np.uint8) - - # Resize for input to the network. - cv_image = imutils.resize(cv_image, width=600) - h, w = cv_image.shape[:2] - - # Construct a blob from the image. - blob = cv2.dnn.blobFromImage( - cv2.resize(cv_image, (300, 300)), - 1.0, (300, 300), - (104.0, 177.0, 123.0), - swapRB=False, crop=False - ) - - # Apply OpenCV's deep learning-based face detector to localize - # faces in the input image - self.detector.setInput(blob) - detections = self.detector.forward() - - print(detections, 'the face detections') - - # Iterate detections - for detection in detections[0][0]: - - # Extract confidence. - confidence = detection[2] - - # Ensure confidence of detection is above specified threshold. - if confidence > CONFIDENCE_THRESHOLD: - - # Compute bounding box coordinates. - face_bb = detection[3:7] * np.array([w, h, w, h]) - x1, y1, x2, y2 = face_bb.astype("int") - - # Extract face. - face = cv_image[y1:y2, x1:x2] - - face_width, face_height = face.shape[:2] - - # Ensure face width and height are sufficiently large. - if face_width < 20 or face_height < 20: continue - - # Construct a blob from the face. - faceBlob = cv2.dnn.blobFromImage( - face, 1.0 / 255, (96, 96), - (0, 0, 0), - swapRB=True, crop=False - ) - - # Pass blob through face embedding model - # to obtain the 128-d quantification of the face. - self.embedder.setInput(faceBlob) - vec = self.embedder.forward() - - # Perform classification to recognise the face. - predictions = self.recognizer.predict_proba(vec)[0] - j = np.argmax(predictions) - prob = predictions[j] - name = self.le.classes_[j] - print(name, prob, 'hello there') - - # Append the detection to the response. - if isinstance(req, FaceDetectionRequest): - response.detected_objects.append(Detection(name, prob, [x1, y1, x2, y2])) - # else: - # centroid = bb_to_centroid(req.cloud, x1, y1, x2 - x1, y2 - y1) - # response.detections.append(DetectionPCL(name, prob, [x1, y1, x2, y2], centroid)) - # print('the resp is ', response) - return response - - -if __name__ == "__main__": - rospy.init_node("face_detection_server") - server = FaceDetectionServer() - service = rospy.Service('face_detection_server', FaceDetection, server) - # service_pcl = rospy.Service('face_detection_pcl', FaceDetectionPCL, server) - rospy.loginfo("Face Detection Service initialised") - rospy.spin() \ No newline at end of file diff --git a/legacy/face_detection/srv/FaceDetection.srv b/legacy/face_detection/srv/FaceDetection.srv deleted file mode 100755 index 52b8d5ce6..000000000 --- a/legacy/face_detection/srv/FaceDetection.srv +++ /dev/null @@ -1,3 +0,0 @@ -sensor_msgs/Image image_raw ---- -lasr_vision_msgs/Detection[] detected_objects diff --git a/legacy/face_detection/srv/FaceDetectionPCL.srv b/legacy/face_detection/srv/FaceDetectionPCL.srv deleted file mode 100644 index 4a3b5dde2..000000000 --- a/legacy/face_detection/srv/FaceDetectionPCL.srv +++ /dev/null @@ -1,3 +0,0 @@ -sensor_msgs/PointCloud2 cloud ---- -face_detection/DetectionPCL[] detections \ No newline at end of file diff --git a/legacy/find_person_and_ask_open_door/CMakeLists.txt b/legacy/find_person_and_ask_open_door/CMakeLists.txt deleted file mode 100644 index c42d8426d..000000000 --- a/legacy/find_person_and_ask_open_door/CMakeLists.txt +++ /dev/null @@ -1,204 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(find_person_and_ask_open_door) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - rospy -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder -# add_service_files( -# FILES -# Service1.srv -# Service2.srv -# ) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -# generate_messages( -# DEPENDENCIES -# std_msgs # Or other packages containing msgs -# ) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES find_person_and_ask_open_door -# CATKIN_DEPENDS rospy -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/find_person_and_ask_open_door.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/find_person_and_ask_open_door_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_find_person_and_ask_open_door.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/legacy/find_person_and_ask_open_door/README.md b/legacy/find_person_and_ask_open_door/README.md deleted file mode 100644 index 1a73ef83d..000000000 --- a/legacy/find_person_and_ask_open_door/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# find_person_and_ask_open_door - -Goes to a specified door and searches for a person. When person is found, ask person - to open the door. - -This package is maintained by: -- [elisabeth](mailto:elisabeth@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- rospy (build) -- rospy (exec) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -#### `find_person_and_ask` - -No description provided. - - -### Messages - -This package has no messages. - -### Services - -This package has no services. - -### Actions - -This package has no actions. diff --git a/legacy/find_person_and_ask_open_door/config/map_points.yaml b/legacy/find_person_and_ask_open_door/config/map_points.yaml deleted file mode 100644 index c0f2744be..000000000 --- a/legacy/find_person_and_ask_open_door/config/map_points.yaml +++ /dev/null @@ -1,22 +0,0 @@ -door_simu: - position: - x: 6.133732680056999 - y: -0.7289013393838328 - z: 0.0 - orientation: - x: 0.0 - y: 0.0 - z: 0.028572161371893147 - w: 0.9995917324560756 - -door: - position: - x: -0.8110280734845722 - y: -1.7996335500178748 - z: 0.0 - orientation: - x: 0.0 - y: 0.0 - z: -0.9671729273733352 - w: 0.2541191227674403 - diff --git a/legacy/find_person_and_ask_open_door/launch/find_person_and_ask.launch b/legacy/find_person_and_ask_open_door/launch/find_person_and_ask.launch deleted file mode 100644 index 0ace32add..000000000 --- a/legacy/find_person_and_ask_open_door/launch/find_person_and_ask.launch +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/legacy/find_person_and_ask_open_door/package.xml b/legacy/find_person_and_ask_open_door/package.xml deleted file mode 100644 index f9528aaa7..000000000 --- a/legacy/find_person_and_ask_open_door/package.xml +++ /dev/null @@ -1,63 +0,0 @@ - - - find_person_and_ask_open_door - 0.0.0 - Goes to a specified door and searches for a person. When person is found, ask person - to open the door. - - - - - elisabeth - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - rospy - rospy - rospy - - - - - - - - \ No newline at end of file diff --git a/legacy/find_person_and_ask_open_door/setup.py b/legacy/find_person_and_ask_open_door/setup.py deleted file mode 100644 index 71fec1ae6..000000000 --- a/legacy/find_person_and_ask_open_door/setup.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python3 - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -setup_args = generate_distutils_setup( - packages=['find_person_and_ask_open_door'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/find_person_and_ask_open_door/src/find_person_and_ask_open_door/find_person_and_ask.py b/legacy/find_person_and_ask_open_door/src/find_person_and_ask_open_door/find_person_and_ask.py deleted file mode 100755 index 7b1ab114f..000000000 --- a/legacy/find_person_and_ask_open_door/src/find_person_and_ask_open_door/find_person_and_ask.py +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env python3 -import rospy -from tiago_controllers.controllers.base_controller import CmdVelController -# from tiago_controllers.head_controller import HeadController -from tiago_controllers.controllers.look_at import LookAt -# from lasr_voice.voice import Voice -from tiago_controllers.helpers import get_pose_from_param -from lasr_object_detection_yolo.detect_objects import detect_objects -from tiago_controllers.controllers import Controllers - -# TODO: make statemachine -# TODO: add simulation:= true/false in launch file - -HORIZONTAL = 0.8 -VERTICAL = 0.3 - - -class FindPersonAndAsk: - def __init__(self): - self.controllers = Controllers() - self.base_controller = self.controllers.base_controller - self.head_controller = self.controllers.head_controller - self.cmd_vel_controller = CmdVelController() - - self.head_controller.sync_reach_to(0, 0) # start by centering the head - # self.voice = Voice() - self.search_points = [(-1 * HORIZONTAL, VERTICAL), - (0, VERTICAL), - (HORIZONTAL, VERTICAL), - (HORIZONTAL, 0), - (0, 0), - (-1 * HORIZONTAL, 0), - (-1 * HORIZONTAL, -1 * VERTICAL), - (0, -1 * VERTICAL), - (HORIZONTAL, -1 * VERTICAL), - (0, 0)] - - def search_for_person(self): - for point in self.search_points: - self.head_controller.sync_reach_to(point[0], point[1]) - people = detect_objects(["person"], "coco") - if people: - return people[0], point[0] - return None - - # ! State machine 1: go to place - # ! State machine 2: search - # ! State machine 3: turn - # ! State machine 4: talk - def main(self): - # door_pos = get_pose_from_param('/door_simu') - # door_pos = get_pose_from_param('/door') - # self.base_controller.sync_to_pose(door_pos) - cmd_vel = CmdVelController() - turns = 3 - for i in range(turns): - try: - closest_person, head_rot = self.search_for_person() - if closest_person: - print("FOUND PERSON") - look_at = LookAt(self.head_controller, self.base_controller, self.cmd_vel_controller, "person") - look_at.look_at(closest_person.xywh, head_rot) - print("CAN YOU PLEASE OPEN THE DOOR") - self.voice.sync_tts("Can you please open the door for me?") - return - except TypeError: - cmd_vel.rotate(60, 360/turns, True) - - print("I CANT SEE ANYONE") - self.voice.sync_tts("I can't see anyone!") - - -if __name__ == '__main__': - rospy.init_node("find_person_and_ask_open_door_node", anonymous=True) - find_and_ask = FindPersonAndAsk() - find_and_ask.main() diff --git a/legacy/lasr_object_detection_yolo/src/__init__.py b/legacy/graph_room_navigation/CATKIN_IGNORE similarity index 100% rename from legacy/lasr_object_detection_yolo/src/__init__.py rename to legacy/graph_room_navigation/CATKIN_IGNORE diff --git a/legacy/graph_room_navigation/package.xml b/legacy/graph_room_navigation/package.xml index 9fb8f1a0b..0e58bf540 100644 --- a/legacy/graph_room_navigation/package.xml +++ b/legacy/graph_room_navigation/package.xml @@ -7,7 +7,7 @@ - jared + Jared Swift diff --git a/legacy/interaction_module/CMakeLists.txt b/legacy/interaction_module/CMakeLists.txt deleted file mode 100644 index 116462ac9..000000000 --- a/legacy/interaction_module/CMakeLists.txt +++ /dev/null @@ -1,207 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(interaction_module) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - geometry_msgs - roscpp - rospy - std_msgs - message_generation -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder - add_service_files( - FILES - AudioAndTextInteraction.srv -) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here - generate_messages( - DEPENDENCIES - std_msgs - ) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES interaction_module -# CATKIN_DEPENDS geometry_msgs roscpp rospy std_msgs -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/interaction_module.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/interaction_module_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_interaction_module.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/legacy/interaction_module/README.md b/legacy/interaction_module/README.md deleted file mode 100644 index 36aa36cbc..000000000 --- a/legacy/interaction_module/README.md +++ /dev/null @@ -1,70 +0,0 @@ -# interaction_module - -The interaction_module package - -This package is maintained by: -- [nicole](mailto:nicole@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- geometry_msgs (build) -- roscpp (build) -- rospy (build) -- std_msgs (build) -- geometry_msgs (exec) -- roscpp (exec) -- rospy (exec) -- std_msgs (exec) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -#### `interaction` - -No description provided. - - -### Messages - -This package has no messages. - -### Services - -#### `AudioAndTextInteraction` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| action | string | | -| subaction | string | | -| query_text | string | | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| status | bool | | -| result | string | | - - -### Actions - -This package has no actions. diff --git a/legacy/interaction_module/launch/interaction.launch b/legacy/interaction_module/launch/interaction.launch deleted file mode 100644 index cb166d55e..000000000 --- a/legacy/interaction_module/launch/interaction.launch +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/legacy/interaction_module/package.xml b/legacy/interaction_module/package.xml deleted file mode 100644 index 60c8ddc32..000000000 --- a/legacy/interaction_module/package.xml +++ /dev/null @@ -1,71 +0,0 @@ - - - interaction_module - 0.0.0 - The interaction_module package - - - - - nicole - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - geometry_msgs - roscpp - rospy - std_msgs - geometry_msgs - roscpp - rospy - std_msgs - geometry_msgs - roscpp - rospy - std_msgs - - - - - - - - diff --git a/legacy/interaction_module/setup.py b/legacy/interaction_module/setup.py deleted file mode 100644 index 3463b4cd6..000000000 --- a/legacy/interaction_module/setup.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python3 - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -# fetch values from package.xml -setup_args = generate_distutils_setup( - packages=['interaction_module'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/interaction_module/src/interaction_module/interaction_module_srv.py b/legacy/interaction_module/src/interaction_module/interaction_module_srv.py deleted file mode 100755 index 3c0e26c52..000000000 --- a/legacy/interaction_module/src/interaction_module/interaction_module_srv.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env python3 -import rospy -from interaction_module.srv import AudioAndTextInteraction, AudioAndTextInteractionResponse -from listen_module.srv import DialogListen, DialogListenRequest, DialogListenResponse - - -class InteractionModule: - def __init__(self): - self.dialog_srv = rospy.ServiceProxy("/dialogflow_server", DialogListen) - - self.interaction_module_srv = rospy.Service("/interaction_module", AudioAndTextInteraction, self.interaction_module_cb) - - def interaction_module_cb(self, request): - rospy.loginfo("Received audio request") - resp = AudioAndTextInteractionResponse() - print(request.action, request.subaction, request.query_text) - result = self.dialog_srv(request.action, request.subaction, request.query_text) - if result: - resp.status = result.status - resp.result = result.result - # possibly call to talk here - return resp - - -if __name__ == '__main__': - rospy.init_node("interaction_module") - rospy.loginfo("Interaction Module is up and running") - interaction_module = InteractionModule() - rospy.spin() \ No newline at end of file diff --git a/legacy/interaction_module/src/interaction_module/test_interaction.py b/legacy/interaction_module/src/interaction_module/test_interaction.py deleted file mode 100755 index b9ac83198..000000000 --- a/legacy/interaction_module/src/interaction_module/test_interaction.py +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env python3 -import rospy - -rospy.init_node("test") - -from interaction_module.srv import AudioAndTextInteraction - -speech = rospy.ServiceProxy("/interaction_module", AudioAndTextInteraction) -# print(speech("ROOM_REQUEST", "ask_location", "Can you show me where the 609 office is")) -print(speech("ROOM_REQUEST", "ask_location", "SOUND:PLAYING:PLEASE")) diff --git a/legacy/interaction_module/srv/AudioAndTextInteraction.srv b/legacy/interaction_module/srv/AudioAndTextInteraction.srv deleted file mode 100644 index 8fb8ea81c..000000000 --- a/legacy/interaction_module/srv/AudioAndTextInteraction.srv +++ /dev/null @@ -1,7 +0,0 @@ -string action -string subaction -string query_text ---- -bool status -string result - diff --git a/legacy/lasr_dialogflow/README.md b/legacy/lasr_dialogflow/README.md deleted file mode 100644 index faf2964d5..000000000 --- a/legacy/lasr_dialogflow/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# lasr_dialogflow - -The lasr_dialogflow package - -This package is maintained by: -- [jared](mailto:jared@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -This package has no launch files. - -### Messages - -This package has no messages. - -### Services - -#### `DialogflowAudio` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| task | std_msgs/String | | -| action | std_msgs/String | | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| result | std_msgs/String | | -| success | std_msgs/Bool | | - -#### `DialogflowText` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| task | std_msgs/String | | -| action | std_msgs/String | | -| query_text | std_msgs/String | | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| result | std_msgs/String | | -| success | std_msgs/Bool | | - - -### Actions - -This package has no actions. diff --git a/legacy/lasr_dialogflow/src/lasr_dialogflow/actions/__init__.py b/legacy/lasr_dialogflow/src/lasr_dialogflow/actions/__init__.py deleted file mode 100644 index aaec110cf..000000000 --- a/legacy/lasr_dialogflow/src/lasr_dialogflow/actions/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from lasr_dialogflow.actions.base import BaseAction -from lasr_dialogflow.actions.receptionist import ReceptionistAction \ No newline at end of file diff --git a/legacy/lasr_dialogflow/src/lasr_dialogflow/actions/base.py b/legacy/lasr_dialogflow/src/lasr_dialogflow/actions/base.py deleted file mode 100644 index 700b709a0..000000000 --- a/legacy/lasr_dialogflow/src/lasr_dialogflow/actions/base.py +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env python3 - -from lasr_dialogflow.dialogflow_client_stream import DialogflowClientStream -import uuid -import dialogflow - -class BaseAction(): - - def __init__(self, project_id, df_lang_id="en"): - - self.project_id = project_id - self.df_lang_id = df_lang_id - self.attempts = 0 - self.max_attempts = 3 - self.streaming_client = None - - self.session_id = uuid.uuid4() - - self.streaming_client = DialogflowClientStream( - self.project_id, - self.session_id, - language_code=self.df_lang_id - ) - - def stop(): - self.streaming_client.stop() - - def listen_in_context(self, context=None): - - query_params = None - if context: - query_params = dialogflow.types.session_pb2.QueryParameters(contexts=[self.get_context(context)]) - - response = None - for response in self.streaming_client.response_generator(query_params): - if response.recognition_result.message_type == dialogflow.enums.StreamingRecognitionResult.MessageType.END_OF_SINGLE_UTTERANCE: - rospy.loginfo("received end of utterance") - self.stop() - - if response and not response.query_result.query_text: - rospy.loginfo("no audio request") - self.attempts +=1 - if self.attempts >= self.max_attempts: - return None - else: - # speak here - return self.listen_in_context(context) - else: - self.attempts = 0 - return reponse - - def text_in_context(self, text, context=None): - query_params = None - if context: - query_params = dialogflow.types.session_pb2.QueryParameters(contexts=[self.get_context(context)]) - - return self.streaming_client.text_request(text, query_params=query_params) - - def get_context(self, context, lifespan=1): - return dialogflow.types.context_pb2.Context(name=dialogflow.ContextsClient.context_path(self.project_id, self.session_id, context), - lifespan_count=lifespan) \ No newline at end of file diff --git a/legacy/lasr_dialogflow/src/lasr_dialogflow/actions/receptionist.py b/legacy/lasr_dialogflow/src/lasr_dialogflow/actions/receptionist.py deleted file mode 100644 index 71dcf80ec..000000000 --- a/legacy/lasr_dialogflow/src/lasr_dialogflow/actions/receptionist.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python3 -import random -import rospy -from lasr_dialogflow.actions import BaseAction -from std_msgs.msg import String -from google.api_core.exceptions import DeadlineExceeded - -class ReceptionistAction(BaseAction): - def __init__(self, project_id, df_lang_id="en"): - super(ReceptionistAction, self).__init__(project_id, df_lang_id="en") - - self.name = None - self.name_confirmed = None - self.current_context = None - self.favourite_drink = None - self.max_retries = 3 - - self.actions = { - "ask_name" : self.ask_name, - "ask_favourite_drink" : self.ask_favourite_drink, - "stop" : self.stop, - } - - def ask_name(self, use_mic=False, text=None): - - if not use_mic and text: - print(self.handle_response(self.text_in_context(text, context="GetName"), "GetName")) - return self.handle_response(self.text_in_context(text, context="GetName"), "GetName") - elif use_mic: - self.say_text("What's your name?") - attempt = 0 - while attempt < self.max_retries: - name = self.handle_response(self.listen_in_context("GetName"), "GetName") - if name: - self.say_text(f"Your name is {name}, is that correct?") - correct = self.handle_response(self.listen_in_context("ConfirmCheck"), "ConfirmCheck") - if correct == "yes": - return name - else: - self.say_text("Okay. Can you repeat your name please?") - else: - self.say_text("Sorry, I didn't get that. Can you repeat please?") - attempt +=1 - - return "" - - def ask_favourite_drink(self): - self.say_text("What's your favourite drink?") - - attempt = 0 - while attempt < self.max_retries: - drink = self.handle_response(self.listen_in_context("GetFavouriteDrink"), "GetFavouriteDrink") - if drink: - self.say_text(f"Your favourite drink is {drink}, is that correct?") - correct = self.handle_response(self.listen_in_context("ConfirmCheck"), "ConfirmCheck") - if correct == "yes": - return drink - else: - self.say_text("Okay. Can you repeat your favourite drink please?") - else: - self.say_text("Sorry, I didn't get that. Can you repeat please?") - attempt +=1 - - return "" - - def handle_response(self, response, context): - if not response: - return None - try: - if context == "GetName": - return response.query_result.parameters["given-name"] - elif context == "GetFavouriteDrink": - print(response.query_result) - return response.query_result.parameters["drink-name"] - elif context == "ConfirmCheck": - return response.query_result.parameters["yesno"] - except ValueError: - return None \ No newline at end of file diff --git a/legacy/lasr_dialogflow/src/lasr_dialogflow/dialogflow_client_stream.py b/legacy/lasr_dialogflow/src/lasr_dialogflow/dialogflow_client_stream.py deleted file mode 100644 index 2f442183b..000000000 --- a/legacy/lasr_dialogflow/src/lasr_dialogflow/dialogflow_client_stream.py +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sounddevice -import dialogflow -import rospkg - -CHUNK_SUZE = 4096 - -class DialogflowClientStream(): - - def __init__(self, project_id, session_id, language_code="en", audio_encoding=dialogflow.enums.AudioEncoding.AUDIO_ENCODING_LINEAR_16, sample_rate=None, input_device=None): - - self.input_device = input_device - self.sample_rate = sample_rate - - credentials_path = os.path.join(rospkg.RosPack().get_path('lasr_dialogflow'), 'config', '{}.json'.format(project_id)) - - self.session_client = dialogflow.SessionsClient.from_service_account_file(credentials_path) - self.session_path = self.session_client.session_path(project_id, session_id) - self.audio_config = dialogflow.types.session_pb2.InputAudioConfig( - language_code=language_code, - audio_encoding=audio_encoding, - sample_rate_hertz=sample_rate, - ) - - def response_generator(self, query_params=None): - return self.session_client.streaming_detect_intent(self.request_generator(query_params)) - - def requests_generator(self, query_params=None): - self.stop_requested = False - - microphone_stream = sounddevice.InputStream(samplerate=self.sample_rate, device=self.input_device) - - query_input = dialogflow.types.session_pb2.QueryInput(audio_config=self.audio_config) - - # First request is for config - yield dialogflow.types.session_pb2.StreamingDetectIntentRequest( - session=self.session_path, - query_input=query_input, - query_params=query_params, - single_utterance=True, - ) - - while not self.stop_requested and microphone_stream.active(): - data = microphone_stream.read(CHUNK_SIZE) - yield dialogflow.types.session_pb2.StreamingDetectIntentRequest(input_audio=data) - - microphone_stream.stop() - - def text_request(self, text, query_params=None): - text_input = dialogflow.types.session_pb2.TextInput(text=text, language_code="en-GB") - query_input = dialogflow.types.session_pb2.QueryInput(text=text_input) - response = self.session_client.detect_intent(session=self.session_path, query_input=query_input, query_params=query_params) - - return response - - def stop(self): - self.stop_requested = True diff --git a/legacy/lasr_dialogflow/src/lasr_dialogflow/dialogflow_server.py b/legacy/lasr_dialogflow/src/lasr_dialogflow/dialogflow_server.py deleted file mode 100755 index 8653b8a7b..000000000 --- a/legacy/lasr_dialogflow/src/lasr_dialogflow/dialogflow_server.py +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env python3 -import rospy -from lasr_dialogflow.srv import DialogflowAudio, DialogflowAudioResponse, DialogflowText, DialogflowTextResponse -from lasr_dialogflow.actions import ReceptionistAction -import json -import os -from std_msgs.msg import String - -class DialogflowServer(): - - def __init__(self): - - self.config = { - "robocup_receptionist" : { - "project_id" : "robocup-receptionist-evnn", - "cls" : ReceptionistAction - } - } - - self.audio_srv = rospy.Service("dialogflow_server/process_audio", DialogflowAudio, self.process_audio) - self.text_srv = rospy.Service("dialogflow_server/process_text", DialogflowText, self.process_text) - - def process_audio(self, req): - - response = DialogflowAudioResponse() - - project_id = self.config[req.task.data]["project_id"] - task = self.config[req.task.data]["cls"](project_id) - result = task.actions[req.action.data]() - - if result: - response.result = String(result) - response.success = True - - return response - - def process_text(self, req): - - response = DialogflowTextResponse() - - project_id = self.config[req.task.data]["project_id"] - task = self.config[req.task.data]["cls"](project_id) - result = task.actions[req.action.data](use_mic=False, text=req.query_text.data) - - if result: - response.result = String(result) - response.success = True - - return response - -if __name__ == "__main__": - rospy.init_node("dialogflow_server") - dialogflow_server = DialogflowServer() - rospy.spin() \ No newline at end of file diff --git a/legacy/lasr_dialogflow/srv/DialogflowAudio.srv b/legacy/lasr_dialogflow/srv/DialogflowAudio.srv deleted file mode 100644 index bdca858cf..000000000 --- a/legacy/lasr_dialogflow/srv/DialogflowAudio.srv +++ /dev/null @@ -1,5 +0,0 @@ -std_msgs/String task -std_msgs/String action ---- -std_msgs/String result -std_msgs/Bool success \ No newline at end of file diff --git a/legacy/lasr_dialogflow/srv/DialogflowText.srv b/legacy/lasr_dialogflow/srv/DialogflowText.srv deleted file mode 100644 index 2b9497c61..000000000 --- a/legacy/lasr_dialogflow/srv/DialogflowText.srv +++ /dev/null @@ -1,6 +0,0 @@ -std_msgs/String task -std_msgs/String action -std_msgs/String query_text ---- -std_msgs/String result -std_msgs/Bool success \ No newline at end of file diff --git a/legacy/lasr_interaction_server/CMakeLists.txt b/legacy/lasr_interaction_server/CMakeLists.txt deleted file mode 100644 index a26dbfad7..000000000 --- a/legacy/lasr_interaction_server/CMakeLists.txt +++ /dev/null @@ -1,202 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(lasr_interaction_server) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS message_generation message_runtime rospy std_msgs) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -# catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder -add_service_files( - FILES - SpeechInteraction.srv - TextInteraction.srv -) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -generate_messages( - DEPENDENCIES - std_msgs # Or other packages containing msgs -) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES lasr_interaction_server -# CATKIN_DEPENDS other_catkin_pkg -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include -# ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/lasr_interaction_server.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/lasr_interaction_server_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_lasr_interaction_server.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/legacy/lasr_interaction_server/README.md b/legacy/lasr_interaction_server/README.md deleted file mode 100644 index e01c74a48..000000000 --- a/legacy/lasr_interaction_server/README.md +++ /dev/null @@ -1,78 +0,0 @@ -# lasr_interaction_server - -The lasr_interaction_server package - -This package is maintained by: -- [jared](mailto:jared@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -#### `interaction_server` - -No description provided. - - -### Messages - -This package has no messages. - -### Services - -#### `TextInteraction` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| task | std_msgs/String | | -| action | std_msgs/String | | -| query_text | std_msgs/String | | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| result | std_msgs/String | | -| success | std_msgs/Bool | | - -#### `SpeechInteraction` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| task | std_msgs/String | | -| action | std_msgs/String | | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| result | std_msgs/String | | -| success | std_msgs/Bool | | - - -### Actions - -This package has no actions. diff --git a/legacy/lasr_interaction_server/launch/interaction_server.launch b/legacy/lasr_interaction_server/launch/interaction_server.launch deleted file mode 100644 index 9ab0dbf99..000000000 --- a/legacy/lasr_interaction_server/launch/interaction_server.launch +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - - \ No newline at end of file diff --git a/legacy/lasr_interaction_server/src/interaction_server.py b/legacy/lasr_interaction_server/src/interaction_server.py deleted file mode 100755 index 9acafe7d9..000000000 --- a/legacy/lasr_interaction_server/src/interaction_server.py +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env python3 -import rospy - -from std_msgs.msg import String -from lasr_interaction_server.srv import SpeechInteraction, TextInteraction -from lasr_dialogflow.srv import DialogflowAudio, DialogflowText -from pal_interaction_msgs.msg import TtsActionGoal - -class InteractionServer(): - - def __init__(self): - - self.dialogflow_process_audio = rospy.ServiceProxy("/dialogflow_speech/process_audio", DialogflowAudio) - self.dialogflow_process_text = rospy.ServiceProxy("/dialogflow_speech/process_text", DialogflowText) - - def speech_interaction(self, req): - - response = SpeechInteractionResponse() - - dialogflow_response = self.dialogflow_process_audio(req.task, req.action) - - response.result, response.success = dialogflow_response.result, dialogflow_response.success - - return response - - def text_interaction(self, req): - - response = SpeechInteractionResponse() - - dialogflow_response = self.dialogflow_process_text(req.task, req.action, req.query_text) - - response.result, response.success = dialogflow_response.result, dialogflow_response.success - - return response - - def speak(self, text): - pass - -if __name__ == "__main__": - rospy.init_node("lasr_interaction_server") - interaction_server = InteractionServer() - rospy.spin() \ No newline at end of file diff --git a/legacy/lasr_interaction_server/srv/SpeechInteraction.srv b/legacy/lasr_interaction_server/srv/SpeechInteraction.srv deleted file mode 100644 index bdca858cf..000000000 --- a/legacy/lasr_interaction_server/srv/SpeechInteraction.srv +++ /dev/null @@ -1,5 +0,0 @@ -std_msgs/String task -std_msgs/String action ---- -std_msgs/String result -std_msgs/Bool success \ No newline at end of file diff --git a/legacy/lasr_interaction_server/srv/TextInteraction.srv b/legacy/lasr_interaction_server/srv/TextInteraction.srv deleted file mode 100644 index 2b9497c61..000000000 --- a/legacy/lasr_interaction_server/srv/TextInteraction.srv +++ /dev/null @@ -1,6 +0,0 @@ -std_msgs/String task -std_msgs/String action -std_msgs/String query_text ---- -std_msgs/String result -std_msgs/Bool success \ No newline at end of file diff --git a/legacy/lasr_navigate_to_known_person/README.md b/legacy/lasr_navigate_to_known_person/README.md deleted file mode 100644 index 00062d574..000000000 --- a/legacy/lasr_navigate_to_known_person/README.md +++ /dev/null @@ -1,48 +0,0 @@ -# lasr_navigate_to_known_person - -The lasr_navigate_to_known_person package - -This package is maintained by: -- [elisabeth](mailto:elisabeth@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- rospy (build) -- rospy (exec) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -#### `navigate_to_known_person` - -No description provided. - - -### Messages - -This package has no messages. - -### Services - -This package has no services. - -### Actions - -This package has no actions. diff --git a/legacy/lasr_navigate_to_known_person/launch/navigate_to_known_person.launch b/legacy/lasr_navigate_to_known_person/launch/navigate_to_known_person.launch deleted file mode 100644 index a20c8b2b8..000000000 --- a/legacy/lasr_navigate_to_known_person/launch/navigate_to_known_person.launch +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/legacy/lasr_navigate_to_known_person/setup.py b/legacy/lasr_navigate_to_known_person/setup.py deleted file mode 100644 index 24e6a3798..000000000 --- a/legacy/lasr_navigate_to_known_person/setup.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python3 - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -setup_args = generate_distutils_setup( - packages=['lasr_navigate_to_known_person'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/navigate_to_known_person.py b/legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/navigate_to_known_person.py deleted file mode 100755 index c37b2a21b..000000000 --- a/legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/navigate_to_known_person.py +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env python3 -import rospy -import smach -from lasr_navigate_to_known_person.states import GoTo, Scan -from tiago_controllers.controllers import ReachToRadius, HeadController - - -class ScanAndGoTo(smach.StateMachine): - def __init__(self, base_controller: ReachToRadius, head_controller: HeadController): - smach.StateMachine.__init__(self, outcomes=['succeeded', 'failed']) - - self.base_controller = base_controller - self.head_controller = head_controller - self.userdata.prev = 'Scan' - - with self: - smach.StateMachine.add('SCAN_ROOM', Scan(self.head_controller), - transitions={ - 'succeeded': 'GO_TO_HOST', - 'failed': 'failed' - }, - remapping={ - 'location': 'location', - 'prev': 'prev' - }) - - smach.StateMachine.add('GO_TO_HOST', GoTo(self.base_controller), - transitions={ - 'succeeded': 'succeeded', - 'failed': 'failed' - }, - remapping={ - 'location': 'location', - 'prev': 'prev' - }) - - -if __name__ == '__main__': - rospy.init_node("navigate_to_person_node", anonymous=True) - base_controller = ReachToRadius() - head_controller = HeadController() - sm = smach.StateMachine(outcomes=['success', 'no_people']) - - with sm: - smach.StateMachine.add('SCAN_AND_GO_TO', - ScanAndGoTo(base_controller, head_controller), - transitions={'succeeded': 'success', - 'failed': 'SCAN_AND_GO_TO'}) - - outcome = sm.execute() - rospy.loginfo('I have completed execution with outcome: ') - rospy.loginfo(outcome) diff --git a/legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/states/__init__.py b/legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/states/__init__.py deleted file mode 100644 index a6752cb3c..000000000 --- a/legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/states/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .go_to_state import GoTo -from .scan_state import Scan \ No newline at end of file diff --git a/legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/states/go_to_state.py b/legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/states/go_to_state.py deleted file mode 100644 index 3121966c2..000000000 --- a/legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/states/go_to_state.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python3 -import rospy -import smach -from tiago_controllers.utils import activate_robot_navigation -from tiago_controllers.controllers import ReachToRadius - -class GoTo(smach.State): - def __init__(self, base_controller: ReachToRadius): - smach.State.__init__(self, outcomes=['succeeded', 'failed'], input_keys=['location', 'prev'], - output_keys=['prev']) - self.base_controller = base_controller - - def execute(self, userdata): - # activate navigation on the robot - activate_robot_navigation(True) - self.goal = userdata.location - - # go to radius if previous state was scanning the room to detect the host - if userdata.prev == 'Scan': - rospy.sleep(2.) # give it a 2 seconds for obstacle aware nav - success = self.base_controller.sync_to_radius(self.goal.position.x, self.goal.position.y, radius=2.0, - tol=0.2) - - if success: - success = self.base_controller.sync_face_to(self.goal.position.x, self.goal.position.y) - else: - success = self.base_controller.sync_to_pose(self.goal) - - userdata.prev = 'GoTo' - if success: - rospy.loginfo(f"sending pose goal {self.goal.position} to base_controller...") - return 'succeeded' - else: - rospy.loginfo(f"base controller failed to go to position {self.goal.position}") - return 'failed' diff --git a/legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/states/scan_state.py b/legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/states/scan_state.py deleted file mode 100644 index c368dbd99..000000000 --- a/legacy/lasr_navigate_to_known_person/src/lasr_navigate_to_known_person/states/scan_state.py +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env python3 - -import rospy -import smach -from geometry_msgs.msg import Pose, Quaternion -from sensor_msgs.msg import PointCloud2 -from tiago_controllers.utils import activate_robot_navigation -from face_detection.srv import FaceDetectionPCL - - -class Scan(smach.State): - def __init__(self, head_controller): - smach.State.__init__(self, outcomes=['succeeded', 'failed'], input_keys=['prev'], - output_keys=['location', 'prev']) - self.head_controller = head_controller - rospy.wait_for_service("pcl_face_detection_server") - self.face_detection = rospy.ServiceProxy("pcl_face_detection_server", FaceDetectionPCL) - - def execute(self, userdata): - userdata.prev = 'Scan' - - def rotate_head(direction): - self.head_controller.sync_reach_to(direction, 0.0, velocities=[0.1, 0.]) - - # deactivate navigation on the robot to move the head around - activate_robot_navigation(False) - - # run detection at most 4 times - direction = 0.2 - for _ in range(4): - direction *= -1 - rotate_head(direction) - print(f'rotating head in direction {direction}') - pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) - - resp = self.face_detection(pcl_msg) - print(resp, "my response") - if resp.detections: - for detection in resp.detections: - # print(detection.name) - # if detection.name == "female_doll": - print(detection.centroid.point) - userdata.location = Pose(detection.centroid.point, Quaternion(0, 0, 0, 1)) - return 'succeeded' - return 'failed' diff --git a/legacy/lasr_object_detection_yolo/CMakeLists.txt b/legacy/lasr_object_detection_yolo/CMakeLists.txt deleted file mode 100644 index 6abbae500..000000000 --- a/legacy/lasr_object_detection_yolo/CMakeLists.txt +++ /dev/null @@ -1,208 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(lasr_object_detection_yolo) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - rospy - std_msgs - sensor_msgs - message_runtime - message_generation - lasr_vision_msgs -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Detection.msg -# ) - -## Generate services in the 'srv' folder -add_service_files( - FILES - YoloDetection.srv -) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -generate_messages( - DEPENDENCIES - std_msgs - sensor_msgs - lasr_vision_msgs -) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES lasr_object_detection_yolo -# CATKIN_DEPENDS rospy std_msgs -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/lasr_object_detection_yolo.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/lasr_object_detection_yolo_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_lasr_object_detection_yolo.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/legacy/lasr_object_detection_yolo/README.md b/legacy/lasr_object_detection_yolo/README.md deleted file mode 100644 index 8420085ec..000000000 --- a/legacy/lasr_object_detection_yolo/README.md +++ /dev/null @@ -1,64 +0,0 @@ -# lasr_object_detection_yolo - -The lasr_object_detection_yolo package - -This package is maintained by: -- [jared](mailto:jared@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- rospy (build) -- std_msgs (build) -- rospy (exec) -- std_msgs (exec) -- lasr_vision_msgs - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -This package has no launch files. - -### Messages - -This package has no messages. - -### Services - -#### `YoloDetection` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| image_raw | sensor_msgs/Image | | -| dataset | string | | -| confidence | float32 | | -| nms | float32 | | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| detected_objects | lasr_vision_msgs/Detection[] | | - - -### Actions - -This package has no actions. diff --git a/legacy/lasr_object_detection_yolo/package.xml b/legacy/lasr_object_detection_yolo/package.xml deleted file mode 100644 index b5b188445..000000000 --- a/legacy/lasr_object_detection_yolo/package.xml +++ /dev/null @@ -1,66 +0,0 @@ - - - lasr_object_detection_yolo - 0.0.0 - The lasr_object_detection_yolo package - - - - - jared - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - rospy - std_msgs - rospy - std_msgs - rospy - std_msgs - lasr_vision_msgs - - - - - - - - \ No newline at end of file diff --git a/legacy/lasr_object_detection_yolo/setup.py b/legacy/lasr_object_detection_yolo/setup.py deleted file mode 100644 index 151d41685..000000000 --- a/legacy/lasr_object_detection_yolo/setup.py +++ /dev/null @@ -1,12 +0,0 @@ - -#!/usr/bin/env python3 - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -setup_args = generate_distutils_setup( - packages=['lasr_object_detection_yolo'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/config.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/config.py deleted file mode 100644 index 197160634..000000000 --- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/config.py +++ /dev/null @@ -1,100 +0,0 @@ -import torch -from darknet_pytorch.torch_utils import convert2cpu - - -def parse_cfg(cfgfile): - blocks = [] - fp = open(cfgfile, 'r') - block = None - line = fp.readline() - while line != '': - line = line.rstrip() - if line == '' or line[0] == '#': - line = fp.readline() - continue - elif line[0] == '[': - if block: - blocks.append(block) - block = dict() - block['type'] = line.lstrip('[').rstrip(']') - # set default value - if block['type'] == 'convolutional': - block['batch_normalize'] = 0 - else: - key, value = line.split('=') - key = key.strip() - if key == 'type': - key = '_type' - value = value.strip() - block[key] = value - line = fp.readline() - - if block: - blocks.append(block) - fp.close() - return blocks - - -def load_conv(buf, start, conv_model): - num_w = conv_model.weight.numel() - num_b = conv_model.bias.numel() - conv_model.bias.data.copy_(torch.from_numpy(buf[start:start + num_b])); - start = start + num_b - conv_model.weight.data.copy_(torch.from_numpy(buf[start:start + num_w]).reshape(conv_model.weight.data.shape)); - start = start + num_w - return start - - -def save_conv(fp, conv_model): - if conv_model.bias.is_cuda: - convert2cpu(conv_model.bias.data).numpy().tofile(fp) - convert2cpu(conv_model.weight.data).numpy().tofile(fp) - else: - conv_model.bias.data.numpy().tofile(fp) - conv_model.weight.data.numpy().tofile(fp) - - -def load_conv_bn(buf, start, conv_model, bn_model): - num_w = conv_model.weight.numel() - num_b = bn_model.bias.numel() - bn_model.bias.data.copy_(torch.from_numpy(buf[start:start + num_b])); - start = start + num_b - bn_model.weight.data.copy_(torch.from_numpy(buf[start:start + num_b])); - start = start + num_b - bn_model.running_mean.copy_(torch.from_numpy(buf[start:start + num_b])); - start = start + num_b - bn_model.running_var.copy_(torch.from_numpy(buf[start:start + num_b])); - start = start + num_b - conv_model.weight.data.copy_(torch.from_numpy(buf[start:start + num_w]).reshape(conv_model.weight.data.shape)); - start = start + num_w - return start - - -def save_conv_bn(fp, conv_model, bn_model): - if bn_model.bias.is_cuda: - convert2cpu(bn_model.bias.data).numpy().tofile(fp) - convert2cpu(bn_model.weight.data).numpy().tofile(fp) - convert2cpu(bn_model.running_mean).numpy().tofile(fp) - convert2cpu(bn_model.running_var).numpy().tofile(fp) - convert2cpu(conv_model.weight.data).numpy().tofile(fp) - else: - bn_model.bias.data.numpy().tofile(fp) - bn_model.weight.data.numpy().tofile(fp) - bn_model.running_mean.numpy().tofile(fp) - bn_model.running_var.numpy().tofile(fp) - conv_model.weight.data.numpy().tofile(fp) - - -def load_fc(buf, start, fc_model): - num_w = fc_model.weight.numel() - num_b = fc_model.bias.numel() - fc_model.bias.data.copy_(torch.from_numpy(buf[start:start + num_b])); - start = start + num_b - fc_model.weight.data.copy_(torch.from_numpy(buf[start:start + num_w])); - start = start + num_w - return start - - -def save_fc(fp, fc_model): - fc_model.bias.data.numpy().tofile(fp) - fc_model.weight.data.numpy().tofile(fp) diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/darknet.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/darknet.py deleted file mode 100644 index d3cab61f6..000000000 --- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/darknet.py +++ /dev/null @@ -1,480 +0,0 @@ -import torch.nn as nn -import torch.nn.functional as F -import numpy as np -from darknet_pytorch.region_loss import RegionLoss -from darknet_pytorch.yolo_layer import YoloLayer -from darknet_pytorch.config import * -from darknet_pytorch.torch_utils import * - - -class Mish(torch.nn.Module): - def __init__(self): - super().__init__() - - def forward(self, x): - x = x * (torch.tanh(torch.nn.functional.softplus(x))) - return x - - -class MaxPoolDark(nn.Module): - def __init__(self, size=2, stride=1): - super(MaxPoolDark, self).__init__() - self.size = size - self.stride = stride - - def forward(self, x): - ''' - darknet output_size = (input_size + p - k) / s +1 - p : padding = k - 1 - k : size - s : stride - torch output_size = (input_size + 2*p -k) / s +1 - p : padding = k//2 - ''' - p = self.size // 2 - if ((x.shape[2] - 1) // self.stride) != ((x.shape[2] + 2 * p - self.size) // self.stride): - padding1 = (self.size - 1) // 2 - padding2 = padding1 + 1 - else: - padding1 = (self.size - 1) // 2 - padding2 = padding1 - if ((x.shape[3] - 1) // self.stride) != ((x.shape[3] + 2 * p - self.size) // self.stride): - padding3 = (self.size - 1) // 2 - padding4 = padding3 + 1 - else: - padding3 = (self.size - 1) // 2 - padding4 = padding3 - x = F.max_pool2d(F.pad(x, (padding3, padding4, padding1, padding2), mode='replicate'), - self.size, stride=self.stride) - return x - - -class Upsample_expand(nn.Module): - def __init__(self, stride=2): - super(Upsample_expand, self).__init__() - self.stride = stride - - def forward(self, x): - assert (x.data.dim() == 4) - - x = x.view(x.size(0), x.size(1), x.size(2), 1, x.size(3), 1).\ - expand(x.size(0), x.size(1), x.size(2), self.stride, x.size(3), self.stride).contiguous().\ - view(x.size(0), x.size(1), x.size(2) * self.stride, x.size(3) * self.stride) - - return x - - -class Upsample_interpolate(nn.Module): - def __init__(self, stride): - super(Upsample_interpolate, self).__init__() - self.stride = stride - - def forward(self, x): - assert (x.data.dim() == 4) - - out = F.interpolate(x, size=(x.size(2) * self.stride, x.size(3) * self.stride), mode='nearest') - return out - - -class Reorg(nn.Module): - def __init__(self, stride=2): - super(Reorg, self).__init__() - self.stride = stride - - def forward(self, x): - stride = self.stride - assert (x.data.dim() == 4) - B = x.data.size(0) - C = x.data.size(1) - H = x.data.size(2) - W = x.data.size(3) - assert (H % stride == 0) - assert (W % stride == 0) - ws = stride - hs = stride - x = x.view(B, C, H / hs, hs, W / ws, ws).transpose(3, 4).contiguous() - x = x.view(B, C, H / hs * W / ws, hs * ws).transpose(2, 3).contiguous() - x = x.view(B, C, hs * ws, H / hs, W / ws).transpose(1, 2).contiguous() - x = x.view(B, hs * ws * C, H / hs, W / ws) - return x - - -class GlobalAvgPool2d(nn.Module): - def __init__(self): - super(GlobalAvgPool2d, self).__init__() - - def forward(self, x): - N = x.data.size(0) - C = x.data.size(1) - H = x.data.size(2) - W = x.data.size(3) - x = F.avg_pool2d(x, (H, W)) - x = x.view(N, C) - return x - - -# for route, shortcut and sam -class EmptyModule(nn.Module): - def __init__(self): - super(EmptyModule, self).__init__() - - def forward(self, x): - return x - - -# support route shortcut and reorg -class Darknet(nn.Module): - def __init__(self, cfgfile, inference=False): - super(Darknet, self).__init__() - self.inference = inference - self.training = not self.inference - - self.blocks = parse_cfg(cfgfile) - self.width = int(self.blocks[0]['width']) - self.height = int(self.blocks[0]['height']) - - self.models = self.create_network(self.blocks) # merge conv, bn,leaky - self.loss = self.models[len(self.models) - 1] - - if self.blocks[(len(self.blocks) - 1)]['type'] == 'region': - self.anchors = self.loss.anchors - self.num_anchors = self.loss.num_anchors - self.anchor_step = self.loss.anchor_step - self.num_classes = self.loss.num_classes - - self.header = torch.IntTensor([0, 0, 0, 0]) - self.seen = 0 - - def forward(self, x): - ind = -2 - self.loss = None - outputs = dict() - out_boxes = [] - for block in self.blocks: - ind = ind + 1 - # if ind > 0: - # return x - - if block['type'] == 'net': - continue - elif block['type'] in ['convolutional', 'maxpool', 'reorg', 'upsample', 'avgpool', 'softmax', 'connected']: - x = self.models[ind](x) - outputs[ind] = x - elif block['type'] == 'route': - layers = block['layers'].split(',') - layers = [int(i) if int(i) > 0 else int(i) + ind for i in layers] - if len(layers) == 1: - if 'groups' not in block.keys() or int(block['groups']) == 1: - x = outputs[layers[0]] - outputs[ind] = x - else: - groups = int(block['groups']) - group_id = int(block['group_id']) - _, b, _, _ = outputs[layers[0]].shape - x = outputs[layers[0]][:, b // groups * group_id:b // groups * (group_id + 1)] - outputs[ind] = x - elif len(layers) == 2: - x1 = outputs[layers[0]] - x2 = outputs[layers[1]] - x = torch.cat((x1, x2), 1) - outputs[ind] = x - elif len(layers) == 4: - x1 = outputs[layers[0]] - x2 = outputs[layers[1]] - x3 = outputs[layers[2]] - x4 = outputs[layers[3]] - x = torch.cat((x1, x2, x3, x4), 1) - outputs[ind] = x - else: - print("rounte number > 2 ,is {}".format(len(layers))) - - elif block['type'] == 'shortcut': - from_layer = int(block['from']) - activation = block['activation'] - from_layer = from_layer if from_layer > 0 else from_layer + ind - x1 = outputs[from_layer] - x2 = outputs[ind - 1] - x = x1 + x2 - if activation == 'leaky': - x = F.leaky_relu(x, 0.1, inplace=True) - elif activation == 'relu': - x = F.relu(x, inplace=True) - outputs[ind] = x - elif block['type'] == 'sam': - from_layer = int(block['from']) - from_layer = from_layer if from_layer > 0 else from_layer + ind - x1 = outputs[from_layer] - x2 = outputs[ind - 1] - x = x1 * x2 - outputs[ind] = x - elif block['type'] == 'region': - continue - if self.loss: - self.loss = self.loss + self.models[ind](x) - else: - self.loss = self.models[ind](x) - outputs[ind] = None - elif block['type'] == 'yolo': - # if self.training: - # pass - # else: - # boxes = self.models[ind](x) - # out_boxes.append(boxes) - boxes = self.models[ind](x) - out_boxes.append(boxes) - elif block['type'] == 'cost': - continue - else: - print('unknown type %s' % (block['type'])) - - if self.training: - return out_boxes - else: - return get_region_boxes(out_boxes) - - def print_network(self): - print_cfg(self.blocks) - - def create_network(self, blocks): - models = nn.ModuleList() - - prev_filters = 3 - out_filters = [] - prev_stride = 1 - out_strides = [] - conv_id = 0 - for block in blocks: - if block['type'] == 'net': - prev_filters = int(block['channels']) - continue - elif block['type'] == 'convolutional': - conv_id = conv_id + 1 - batch_normalize = int(block['batch_normalize']) - filters = int(block['filters']) - kernel_size = int(block['size']) - stride = int(block['stride']) - is_pad = int(block['pad']) - pad = (kernel_size - 1) // 2 if is_pad else 0 - activation = block['activation'] - model = nn.Sequential() - if batch_normalize: - model.add_module('conv{0}'.format(conv_id), - nn.Conv2d(prev_filters, filters, kernel_size, stride, pad, bias=False)) - model.add_module('bn{0}'.format(conv_id), nn.BatchNorm2d(filters)) - # model.add_module('bn{0}'.format(conv_id), BN2d(filters)) - else: - model.add_module('conv{0}'.format(conv_id), - nn.Conv2d(prev_filters, filters, kernel_size, stride, pad)) - if activation == 'leaky': - model.add_module('leaky{0}'.format(conv_id), nn.LeakyReLU(0.1, inplace=True)) - elif activation == 'relu': - model.add_module('relu{0}'.format(conv_id), nn.ReLU(inplace=True)) - elif activation == 'mish': - model.add_module('mish{0}'.format(conv_id), Mish()) - elif activation == 'linear': - model.add_module('linear{0}'.format(conv_id), nn.Identity()) - elif activation == 'logistic': - model.add_module('sigmoid{0}'.format(conv_id), nn.Sigmoid()) - else: - print("No convolutional activation named {}".format(activation)) - - prev_filters = filters - out_filters.append(prev_filters) - prev_stride = stride * prev_stride - out_strides.append(prev_stride) - models.append(model) - elif block['type'] == 'maxpool': - pool_size = int(block['size']) - stride = int(block['stride']) - if stride == 1 and pool_size % 2: - # You can use Maxpooldark instead, here is convenient to convert onnx. - # Example: [maxpool] size=3 stride=1 - model = nn.MaxPool2d(kernel_size=pool_size, stride=stride, padding=pool_size // 2) - elif stride == pool_size: - # You can use Maxpooldark instead, here is convenient to convert onnx. - # Example: [maxpool] size=2 stride=2 - model = nn.MaxPool2d(kernel_size=pool_size, stride=stride, padding=0) - else: - model = MaxPoolDark(pool_size, stride) - out_filters.append(prev_filters) - prev_stride = stride * prev_stride - out_strides.append(prev_stride) - models.append(model) - elif block['type'] == 'avgpool': - model = GlobalAvgPool2d() - out_filters.append(prev_filters) - models.append(model) - elif block['type'] == 'softmax': - model = nn.Softmax() - out_strides.append(prev_stride) - out_filters.append(prev_filters) - models.append(model) - elif block['type'] == 'cost': - if block['_type'] == 'sse': - model = nn.MSELoss(reduction='mean') - elif block['_type'] == 'L1': - model = nn.L1Loss(reduction='mean') - elif block['_type'] == 'smooth': - model = nn.SmoothL1Loss(reduction='mean') - out_filters.append(1) - out_strides.append(prev_stride) - models.append(model) - elif block['type'] == 'reorg': - stride = int(block['stride']) - prev_filters = stride * stride * prev_filters - out_filters.append(prev_filters) - prev_stride = prev_stride * stride - out_strides.append(prev_stride) - models.append(Reorg(stride)) - elif block['type'] == 'upsample': - stride = int(block['stride']) - out_filters.append(prev_filters) - prev_stride = prev_stride // stride - out_strides.append(prev_stride) - - models.append(Upsample_expand(stride)) - # models.append(Upsample_interpolate(stride)) - - elif block['type'] == 'route': - layers = block['layers'].split(',') - ind = len(models) - layers = [int(i) if int(i) > 0 else int(i) + ind for i in layers] - if len(layers) == 1: - if 'groups' not in block.keys() or int(block['groups']) == 1: - prev_filters = out_filters[layers[0]] - prev_stride = out_strides[layers[0]] - else: - prev_filters = out_filters[layers[0]] // int(block['groups']) - prev_stride = out_strides[layers[0]] // int(block['groups']) - elif len(layers) == 2: - assert (layers[0] == ind - 1 or layers[1] == ind - 1) - prev_filters = out_filters[layers[0]] + out_filters[layers[1]] - prev_stride = out_strides[layers[0]] - elif len(layers) == 4: - assert (layers[0] == ind - 1) - prev_filters = out_filters[layers[0]] + out_filters[layers[1]] + out_filters[layers[2]] + \ - out_filters[layers[3]] - prev_stride = out_strides[layers[0]] - else: - print("route error!!!") - - out_filters.append(prev_filters) - out_strides.append(prev_stride) - models.append(EmptyModule()) - elif block['type'] == 'shortcut': - ind = len(models) - prev_filters = out_filters[ind - 1] - out_filters.append(prev_filters) - prev_stride = out_strides[ind - 1] - out_strides.append(prev_stride) - models.append(EmptyModule()) - elif block['type'] == 'sam': - ind = len(models) - prev_filters = out_filters[ind - 1] - out_filters.append(prev_filters) - prev_stride = out_strides[ind - 1] - out_strides.append(prev_stride) - models.append(EmptyModule()) - elif block['type'] == 'connected': - filters = int(block['output']) - if block['activation'] == 'linear': - model = nn.Linear(prev_filters, filters) - elif block['activation'] == 'leaky': - model = nn.Sequential( - nn.Linear(prev_filters, filters), - nn.LeakyReLU(0.1, inplace=True)) - elif block['activation'] == 'relu': - model = nn.Sequential( - nn.Linear(prev_filters, filters), - nn.ReLU(inplace=True)) - prev_filters = filters - out_filters.append(prev_filters) - out_strides.append(prev_stride) - models.append(model) - elif block['type'] == 'region': - loss = RegionLoss() - anchors = block['anchors'].split(',') - loss.anchors = [float(i) for i in anchors] - loss.num_classes = int(block['classes']) - loss.num_anchors = int(block['num']) - loss.anchor_step = len(loss.anchors) // loss.num_anchors - loss.object_scale = float(block['object_scale']) - loss.noobject_scale = float(block['noobject_scale']) - loss.class_scale = float(block['class_scale']) - loss.coord_scale = float(block['coord_scale']) - out_filters.append(prev_filters) - out_strides.append(prev_stride) - models.append(loss) - elif block['type'] == 'yolo': - yolo_layer = YoloLayer() - anchors = block['anchors'].split(',') - anchor_mask = block['mask'].split(',') - yolo_layer.anchor_mask = [int(i) for i in anchor_mask] - yolo_layer.anchors = [float(i) for i in anchors] - yolo_layer.num_classes = int(block['classes']) - self.num_classes = yolo_layer.num_classes - yolo_layer.num_anchors = int(block['num']) - yolo_layer.anchor_step = len(yolo_layer.anchors) // yolo_layer.num_anchors - yolo_layer.stride = prev_stride - yolo_layer.scale_x_y = float(block['scale_x_y']) - out_filters.append(prev_filters) - out_strides.append(prev_stride) - models.append(yolo_layer) - else: - print('unknown type %s' % (block['type'])) - - return models - - def load_weights(self, weightfile): - fp = open(weightfile, 'rb') - header = np.fromfile(fp, count=5, dtype=np.int32) - self.header = torch.from_numpy(header) - self.seen = self.header[3] - buf = np.fromfile(fp, dtype=np.float32) - fp.close() - - start = 0 - ind = -2 - for block in self.blocks: - if start >= buf.size: - break - ind = ind + 1 - if block['type'] == 'net': - continue - elif block['type'] == 'convolutional': - model = self.models[ind] - batch_normalize = int(block['batch_normalize']) - if batch_normalize: - start = load_conv_bn(buf, start, model[0], model[1]) - else: - start = load_conv(buf, start, model[0]) - elif block['type'] == 'connected': - model = self.models[ind] - if block['activation'] != 'linear': - start = load_fc(buf, start, model[0]) - else: - start = load_fc(buf, start, model) - elif block['type'] == 'maxpool': - pass - elif block['type'] == 'reorg': - pass - elif block['type'] == 'upsample': - pass - elif block['type'] == 'route': - pass - elif block['type'] == 'shortcut': - pass - elif block['type'] == 'sam': - pass - elif block['type'] == 'region': - pass - elif block['type'] == 'yolo': - pass - elif block['type'] == 'avgpool': - pass - elif block['type'] == 'softmax': - pass - elif block['type'] == 'cost': - pass - else: - print('unknown type %s' % (block['type'])) \ No newline at end of file diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/region_loss.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/region_loss.py deleted file mode 100644 index 75d413376..000000000 --- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/region_loss.py +++ /dev/null @@ -1,195 +0,0 @@ -import torch.nn as nn -import torch.nn.functional as F -from darknet_pytorch.torch_utils import * - - -def build_targets(pred_boxes, target, anchors, num_anchors, num_classes, nH, nW, noobject_scale, object_scale, - sil_thresh, seen): - nB = target.size(0) - nA = num_anchors - nC = num_classes - anchor_step = len(anchors) / num_anchors - conf_mask = torch.ones(nB, nA, nH, nW) * noobject_scale - coord_mask = torch.zeros(nB, nA, nH, nW) - cls_mask = torch.zeros(nB, nA, nH, nW) - tx = torch.zeros(nB, nA, nH, nW) - ty = torch.zeros(nB, nA, nH, nW) - tw = torch.zeros(nB, nA, nH, nW) - th = torch.zeros(nB, nA, nH, nW) - tconf = torch.zeros(nB, nA, nH, nW) - tcls = torch.zeros(nB, nA, nH, nW) - - nAnchors = nA * nH * nW - nPixels = nH * nW - for b in range(nB): - cur_pred_boxes = pred_boxes[b * nAnchors:(b + 1) * nAnchors].t() - cur_ious = torch.zeros(nAnchors) - for t in range(50): - if target[b][t * 5 + 1] == 0: - break - gx = target[b][t * 5 + 1] * nW - gy = target[b][t * 5 + 2] * nH - gw = target[b][t * 5 + 3] * nW - gh = target[b][t * 5 + 4] * nH - cur_gt_boxes = torch.FloatTensor([gx, gy, gw, gh]).repeat(nAnchors, 1).t() - cur_ious = torch.max(cur_ious, bbox_ious(cur_pred_boxes, cur_gt_boxes, x1y1x2y2=False)) - conf_mask[b][cur_ious > sil_thresh] = 0 - if seen < 12800: - if anchor_step == 4: - tx = torch.FloatTensor(anchors).view(nA, anchor_step).index_select(1, torch.LongTensor([2])).view(1, nA, 1, - 1).repeat( - nB, 1, nH, nW) - ty = torch.FloatTensor(anchors).view(num_anchors, anchor_step).index_select(1, torch.LongTensor([2])).view( - 1, nA, 1, 1).repeat(nB, 1, nH, nW) - else: - tx.fill_(0.5) - ty.fill_(0.5) - tw.zero_() - th.zero_() - coord_mask.fill_(1) - - nGT = 0 - nCorrect = 0 - for b in range(nB): - for t in range(50): - if target[b][t * 5 + 1] == 0: - break - nGT = nGT + 1 - best_iou = 0.0 - best_n = -1 - min_dist = 10000 - gx = target[b][t * 5 + 1] * nW - gy = target[b][t * 5 + 2] * nH - gi = int(gx) - gj = int(gy) - gw = target[b][t * 5 + 3] * nW - gh = target[b][t * 5 + 4] * nH - gt_box = [0, 0, gw, gh] - for n in range(nA): - aw = anchors[anchor_step * n] - ah = anchors[anchor_step * n + 1] - anchor_box = [0, 0, aw, ah] - iou = bbox_iou(anchor_box, gt_box, x1y1x2y2=False) - if anchor_step == 4: - ax = anchors[anchor_step * n + 2] - ay = anchors[anchor_step * n + 3] - dist = pow(((gi + ax) - gx), 2) + pow(((gj + ay) - gy), 2) - if iou > best_iou: - best_iou = iou - best_n = n - elif anchor_step == 4 and iou == best_iou and dist < min_dist: - best_iou = iou - best_n = n - min_dist = dist - - gt_box = [gx, gy, gw, gh] - pred_box = pred_boxes[b * nAnchors + best_n * nPixels + gj * nW + gi] - - coord_mask[b][best_n][gj][gi] = 1 - cls_mask[b][best_n][gj][gi] = 1 - conf_mask[b][best_n][gj][gi] = object_scale - tx[b][best_n][gj][gi] = target[b][t * 5 + 1] * nW - gi - ty[b][best_n][gj][gi] = target[b][t * 5 + 2] * nH - gj - tw[b][best_n][gj][gi] = math.log(gw / anchors[anchor_step * best_n]) - th[b][best_n][gj][gi] = math.log(gh / anchors[anchor_step * best_n + 1]) - iou = bbox_iou(gt_box, pred_box, x1y1x2y2=False) # best_iou - tconf[b][best_n][gj][gi] = iou - tcls[b][best_n][gj][gi] = target[b][t * 5] - if iou > 0.5: - nCorrect = nCorrect + 1 - - return nGT, nCorrect, coord_mask, conf_mask, cls_mask, tx, ty, tw, th, tconf, tcls - - -class RegionLoss(nn.Module): - def __init__(self, num_classes=0, anchors=[], num_anchors=1): - super(RegionLoss, self).__init__() - self.num_classes = num_classes - self.anchors = anchors - self.num_anchors = num_anchors - self.anchor_step = len(anchors) / num_anchors - self.coord_scale = 1 - self.noobject_scale = 1 - self.object_scale = 5 - self.class_scale = 1 - self.thresh = 0.6 - self.seen = 0 - - def forward(self, output, target): - # output : BxAs*(4+1+num_classes)*H*W - t0 = time.time() - nB = output.data.size(0) - nA = self.num_anchors - nC = self.num_classes - nH = output.data.size(2) - nW = output.data.size(3) - - output = output.view(nB, nA, (5 + nC), nH, nW) - x = F.sigmoid(output.index_select(2, Variable(torch.cuda.LongTensor([0]))).view(nB, nA, nH, nW)) - y = F.sigmoid(output.index_select(2, Variable(torch.cuda.LongTensor([1]))).view(nB, nA, nH, nW)) - w = output.index_select(2, Variable(torch.cuda.LongTensor([2]))).view(nB, nA, nH, nW) - h = output.index_select(2, Variable(torch.cuda.LongTensor([3]))).view(nB, nA, nH, nW) - conf = F.sigmoid(output.index_select(2, Variable(torch.cuda.LongTensor([4]))).view(nB, nA, nH, nW)) - cls = output.index_select(2, Variable(torch.linspace(5, 5 + nC - 1, nC).long().cuda())) - cls = cls.view(nB * nA, nC, nH * nW).transpose(1, 2).contiguous().view(nB * nA * nH * nW, nC) - t1 = time.time() - - pred_boxes = torch.cuda.FloatTensor(4, nB * nA * nH * nW) - grid_x = torch.linspace(0, nW - 1, nW).repeat(nH, 1).repeat(nB * nA, 1, 1).view(nB * nA * nH * nW).cuda() - grid_y = torch.linspace(0, nH - 1, nH).repeat(nW, 1).t().repeat(nB * nA, 1, 1).view(nB * nA * nH * nW).cuda() - anchor_w = torch.Tensor(self.anchors).view(nA, self.anchor_step).index_select(1, torch.LongTensor([0])).cuda() - anchor_h = torch.Tensor(self.anchors).view(nA, self.anchor_step).index_select(1, torch.LongTensor([1])).cuda() - anchor_w = anchor_w.repeat(nB, 1).repeat(1, 1, nH * nW).view(nB * nA * nH * nW) - anchor_h = anchor_h.repeat(nB, 1).repeat(1, 1, nH * nW).view(nB * nA * nH * nW) - pred_boxes[0] = x.data + grid_x - pred_boxes[1] = y.data + grid_y - pred_boxes[2] = torch.exp(w.data) * anchor_w - pred_boxes[3] = torch.exp(h.data) * anchor_h - pred_boxes = convert2cpu(pred_boxes.transpose(0, 1).contiguous().view(-1, 4)) - t2 = time.time() - - nGT, nCorrect, coord_mask, conf_mask, cls_mask, tx, ty, tw, th, tconf, tcls = build_targets(pred_boxes, - target.data, - self.anchors, nA, - nC, \ - nH, nW, - self.noobject_scale, - self.object_scale, - self.thresh, - self.seen) - cls_mask = (cls_mask == 1) - nProposals = int((conf > 0.25).sum().data[0]) - - tx = Variable(tx.cuda()) - ty = Variable(ty.cuda()) - tw = Variable(tw.cuda()) - th = Variable(th.cuda()) - tconf = Variable(tconf.cuda()) - tcls = Variable(tcls.view(-1)[cls_mask].long().cuda()) - - coord_mask = Variable(coord_mask.cuda()) - conf_mask = Variable(conf_mask.cuda().sqrt()) - cls_mask = Variable(cls_mask.view(-1, 1).repeat(1, nC).cuda()) - cls = cls[cls_mask].view(-1, nC) - - t3 = time.time() - - loss_x = self.coord_scale * nn.MSELoss(reduction='sum')(x * coord_mask, tx * coord_mask) / 2.0 - loss_y = self.coord_scale * nn.MSELoss(reduction='sum')(y * coord_mask, ty * coord_mask) / 2.0 - loss_w = self.coord_scale * nn.MSELoss(reduction='sum')(w * coord_mask, tw * coord_mask) / 2.0 - loss_h = self.coord_scale * nn.MSELoss(reduction='sum')(h * coord_mask, th * coord_mask) / 2.0 - loss_conf = nn.MSELoss(reduction='sum')(conf * conf_mask, tconf * conf_mask) / 2.0 - loss_cls = self.class_scale * nn.CrossEntropyLoss(reduction='sum')(cls, tcls) - loss = loss_x + loss_y + loss_w + loss_h + loss_conf + loss_cls - t4 = time.time() - if False: - print('-----------------------------------') - print(' activation : %f' % (t1 - t0)) - print(' create pred_boxes : %f' % (t2 - t1)) - print(' build targets : %f' % (t3 - t2)) - print(' create loss : %f' % (t4 - t3)) - print(' total : %f' % (t4 - t0)) - print('%d: nGT %d, recall %d, proposals %d, loss: x %f, y %f, w %f, h %f, conf %f, cls %f, total %f' % ( - self.seen, nGT, nCorrect, nProposals, loss_x.data[0], loss_y.data[0], loss_w.data[0], loss_h.data[0], - loss_conf.data[0], loss_cls.data[0], loss.data[0])) - return loss diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/torch_utils.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/torch_utils.py deleted file mode 100644 index 8a9b499ad..000000000 --- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/torch_utils.py +++ /dev/null @@ -1,105 +0,0 @@ -import sys -import os -import time -import math -import torch -import numpy as np -from torch.autograd import Variable - -import itertools -import struct # get_image_size -import imghdr # get_image_size - -from darknet_pytorch import utils - - -def bbox_ious(boxes1, boxes2, x1y1x2y2=True): - if x1y1x2y2: - mx = torch.min(boxes1[0], boxes2[0]) - Mx = torch.max(boxes1[2], boxes2[2]) - my = torch.min(boxes1[1], boxes2[1]) - My = torch.max(boxes1[3], boxes2[3]) - w1 = boxes1[2] - boxes1[0] - h1 = boxes1[3] - boxes1[1] - w2 = boxes2[2] - boxes2[0] - h2 = boxes2[3] - boxes2[1] - else: - mx = torch.min(boxes1[0] - boxes1[2] / 2.0, boxes2[0] - boxes2[2] / 2.0) - Mx = torch.max(boxes1[0] + boxes1[2] / 2.0, boxes2[0] + boxes2[2] / 2.0) - my = torch.min(boxes1[1] - boxes1[3] / 2.0, boxes2[1] - boxes2[3] / 2.0) - My = torch.max(boxes1[1] + boxes1[3] / 2.0, boxes2[1] + boxes2[3] / 2.0) - w1 = boxes1[2] - h1 = boxes1[3] - w2 = boxes2[2] - h2 = boxes2[3] - uw = Mx - mx - uh = My - my - cw = w1 + w2 - uw - ch = h1 + h2 - uh - mask = ((cw <= 0) + (ch <= 0) > 0) - area1 = w1 * h1 - area2 = w2 * h2 - carea = cw * ch - carea[mask] = 0 - uarea = area1 + area2 - carea - return carea / uarea - - -def get_region_boxes(boxes_and_confs): - - # print('Getting boxes from boxes and confs ...') - - boxes_list = [] - confs_list = [] - - for item in boxes_and_confs: - boxes_list.append(item[0]) - confs_list.append(item[1]) - - # boxes: [batch, num1 + num2 + num3, 1, 4] - # confs: [batch, num1 + num2 + num3, num_classes] - boxes = torch.cat(boxes_list, dim=1) - confs = torch.cat(confs_list, dim=1) - - return [boxes, confs] - - -def convert2cpu(gpu_matrix): - return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix) - - -def convert2cpu_long(gpu_matrix): - return torch.LongTensor(gpu_matrix.size()).copy_(gpu_matrix) - - - -def do_detect(model, img, conf_thresh, nms_thresh, use_cuda=1): - model.eval() - with torch.no_grad(): - t0 = time.time() - - if type(img) == np.ndarray and len(img.shape) == 3: # cv2 image - img = torch.from_numpy(img.transpose(2, 0, 1)).float().div(255.0).unsqueeze(0) - elif type(img) == np.ndarray and len(img.shape) == 4: - img = torch.from_numpy(img.transpose(0, 3, 1, 2)).float().div(255.0) - else: - print("unknow image type") - exit(-1) - - if use_cuda: - img = img.cuda() - img = torch.autograd.Variable(img) - - t1 = time.time() - - output = model(img) - - t2 = time.time() - - print('-----------------------------------') - print(' Preprocess : %f' % (t1 - t0)) - print(' Model Inference : %f' % (t2 - t1)) - print('-----------------------------------') - - return utils.post_processing(img, conf_thresh, nms_thresh, output) - diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/utils.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/utils.py deleted file mode 100644 index a42e62642..000000000 --- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/utils.py +++ /dev/null @@ -1,239 +0,0 @@ -import sys -import os -import time -import math -import numpy as np - -import itertools -import struct # get_image_size -import imghdr # get_image_size - - -def sigmoid(x): - return 1.0 / (np.exp(-x) + 1.) - - -def softmax(x): - x = np.exp(x - np.expand_dims(np.max(x, axis=1), axis=1)) - x = x / np.expand_dims(x.sum(axis=1), axis=1) - return x - - -def bbox_iou(box1, box2, x1y1x2y2=True): - - # print('iou box1:', box1) - # print('iou box2:', box2) - - if x1y1x2y2: - mx = min(box1[0], box2[0]) - Mx = max(box1[2], box2[2]) - my = min(box1[1], box2[1]) - My = max(box1[3], box2[3]) - w1 = box1[2] - box1[0] - h1 = box1[3] - box1[1] - w2 = box2[2] - box2[0] - h2 = box2[3] - box2[1] - else: - w1 = box1[2] - h1 = box1[3] - w2 = box2[2] - h2 = box2[3] - - mx = min(box1[0], box2[0]) - Mx = max(box1[0] + w1, box2[0] + w2) - my = min(box1[1], box2[1]) - My = max(box1[1] + h1, box2[1] + h2) - uw = Mx - mx - uh = My - my - cw = w1 + w2 - uw - ch = h1 + h2 - uh - carea = 0 - if cw <= 0 or ch <= 0: - return 0.0 - - area1 = w1 * h1 - area2 = w2 * h2 - carea = cw * ch - uarea = area1 + area2 - carea - return carea / uarea - - -def nms_cpu(boxes, confs, nms_thresh=0.5, min_mode=False): - # print(boxes.shape) - x1 = boxes[:, 0] - y1 = boxes[:, 1] - x2 = boxes[:, 2] - y2 = boxes[:, 3] - - areas = (x2 - x1) * (y2 - y1) - order = confs.argsort()[::-1] - - keep = [] - while order.size > 0: - idx_self = order[0] - idx_other = order[1:] - - keep.append(idx_self) - - xx1 = np.maximum(x1[idx_self], x1[idx_other]) - yy1 = np.maximum(y1[idx_self], y1[idx_other]) - xx2 = np.minimum(x2[idx_self], x2[idx_other]) - yy2 = np.minimum(y2[idx_self], y2[idx_other]) - - w = np.maximum(0.0, xx2 - xx1) - h = np.maximum(0.0, yy2 - yy1) - inter = w * h - - if min_mode: - over = inter / np.minimum(areas[order[0]], areas[order[1:]]) - else: - over = inter / (areas[order[0]] + areas[order[1:]] - inter) - - inds = np.where(over <= nms_thresh)[0] - order = order[inds + 1] - - return np.array(keep) - - - -def plot_boxes_cv2(img, boxes, savename=None, class_names=None, color=None): - import cv2 - img = np.copy(img) - colors = np.array([[1, 0, 1], [0, 0, 1], [0, 1, 1], [0, 1, 0], [1, 1, 0], [1, 0, 0]], dtype=np.float32) - - def get_color(c, x, max_val): - ratio = float(x) / max_val * 5 - i = int(math.floor(ratio)) - j = int(math.ceil(ratio)) - ratio = ratio - i - r = (1 - ratio) * colors[i][c] + ratio * colors[j][c] - return int(r * 255) - - width = img.shape[1] - height = img.shape[0] - for i in range(len(boxes)): - box = boxes[i] - x1 = int(box[0] * width) - y1 = int(box[1] * height) - x2 = int(box[2] * width) - y2 = int(box[3] * height) - bbox_thick = int(0.6 * (height + width) / 600) - if color: - rgb = color - else: - rgb = (255, 0, 0) - if len(box) >= 7 and class_names: - cls_conf = box[5] - cls_id = box[6] - print('%s: %f' % (class_names[cls_id], cls_conf)) - classes = len(class_names) - offset = cls_id * 123457 % classes - red = get_color(2, offset, classes) - green = get_color(1, offset, classes) - blue = get_color(0, offset, classes) - if color is None: - rgb = (red, green, blue) - msg = str(class_names[cls_id])+" "+str(round(cls_conf,3)) - t_size = cv2.getTextSize(msg, 0, 0.7, thickness=bbox_thick // 2)[0] - c1, c2 = (x1,y1), (x2, y2) - c3 = (c1[0] + t_size[0], c1[1] - t_size[1] - 3) - cv2.rectangle(img, (x1,y1), (np.float32(c3[0]), np.float32(c3[1])), rgb, -1) - img = cv2.putText(img, msg, (c1[0], np.float32(c1[1] - 2)), cv2.FONT_HERSHEY_SIMPLEX,0.7, (0,0,0), bbox_thick//2,lineType=cv2.LINE_AA) - - img = cv2.rectangle(img, (x1, y1), (x2, y2), rgb, bbox_thick) - if savename: - print("save plot results to %s" % savename) - cv2.imwrite(savename, img) - return img - - -def read_truths(lab_path): - if not os.path.exists(lab_path): - return np.array([]) - if os.path.getsize(lab_path): - truths = np.loadtxt(lab_path) - truths = truths.reshape(truths.size / 5, 5) # to avoid single truth problem - return truths - else: - return np.array([]) - - -def load_class_names(namesfile): - class_names = [] - with open(namesfile, 'r') as fp: - lines = fp.readlines() - for line in lines: - line = line.rstrip() - class_names.append(line) - return class_names - - - -def post_processing(img, conf_thresh, nms_thresh, output): - - # anchors = [12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401] - # num_anchors = 9 - # anchor_masks = [[0, 1, 2], [3, 4, 5], [6, 7, 8]] - # strides = [8, 16, 32] - # anchor_step = len(anchors) // num_anchors - - # [batch, num, 1, 4] - box_array = output[0] - # [batch, num, num_classes] - confs = output[1] - - t1 = time.time() - - if type(box_array).__name__ != 'ndarray': - box_array = box_array.cpu().detach().numpy() - confs = confs.cpu().detach().numpy() - - num_classes = confs.shape[2] - - # [batch, num, 4] - box_array = box_array[:, :, 0] - - # [batch, num, num_classes] --> [batch, num] - max_conf = np.max(confs, axis=2) - max_id = np.argmax(confs, axis=2) - - t2 = time.time() - - bboxes_batch = [] - for i in range(box_array.shape[0]): - - argwhere = max_conf[i] > conf_thresh - l_box_array = box_array[i, argwhere, :] - l_max_conf = max_conf[i, argwhere] - l_max_id = max_id[i, argwhere] - - bboxes = [] - # nms for each class - for j in range(num_classes): - - cls_argwhere = l_max_id == j - ll_box_array = l_box_array[cls_argwhere, :] - ll_max_conf = l_max_conf[cls_argwhere] - ll_max_id = l_max_id[cls_argwhere] - - keep = nms_cpu(ll_box_array, ll_max_conf, nms_thresh) - - if (keep.size > 0): - ll_box_array = ll_box_array[keep, :] - ll_max_conf = ll_max_conf[keep] - ll_max_id = ll_max_id[keep] - - for k in range(ll_box_array.shape[0]): - bboxes.append([ll_box_array[k, 0], ll_box_array[k, 1], ll_box_array[k, 2], ll_box_array[k, 3], ll_max_conf[k], ll_max_conf[k], ll_max_id[k]]) - - bboxes_batch.append(bboxes) - - t3 = time.time() - - print('-----------------------------------') - print(' max and argmax : %f' % (t2 - t1)) - print(' nms : %f' % (t3 - t2)) - print('Post processing total : %f' % (t3 - t1)) - print('-----------------------------------') - - return bboxes_batch diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/yolo_layer.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/yolo_layer.py deleted file mode 100644 index 1ea21566b..000000000 --- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/yolo_layer.py +++ /dev/null @@ -1,322 +0,0 @@ -import torch.nn as nn -import torch.nn.functional as F -from darknet_pytorch.torch_utils import * - -def yolo_forward(output, conf_thresh, num_classes, anchors, num_anchors, scale_x_y, only_objectness=1, - validation=False): - # Output would be invalid if it does not satisfy this assert - # assert (output.size(1) == (5 + num_classes) * num_anchors) - - # print(output.size()) - - # Slice the second dimension (channel) of output into: - # [ 2, 2, 1, num_classes, 2, 2, 1, num_classes, 2, 2, 1, num_classes ] - # And then into - # bxy = [ 6 ] bwh = [ 6 ] det_conf = [ 3 ] cls_conf = [ num_classes * 3 ] - batch = output.size(0) - H = output.size(2) - W = output.size(3) - - bxy_list = [] - bwh_list = [] - det_confs_list = [] - cls_confs_list = [] - - for i in range(num_anchors): - begin = i * (5 + num_classes) - end = (i + 1) * (5 + num_classes) - - bxy_list.append(output[:, begin : begin + 2]) - bwh_list.append(output[:, begin + 2 : begin + 4]) - det_confs_list.append(output[:, begin + 4 : begin + 5]) - cls_confs_list.append(output[:, begin + 5 : end]) - - # Shape: [batch, num_anchors * 2, H, W] - bxy = torch.cat(bxy_list, dim=1) - # Shape: [batch, num_anchors * 2, H, W] - bwh = torch.cat(bwh_list, dim=1) - - # Shape: [batch, num_anchors, H, W] - det_confs = torch.cat(det_confs_list, dim=1) - # Shape: [batch, num_anchors * H * W] - det_confs = det_confs.view(batch, num_anchors * H * W) - - # Shape: [batch, num_anchors * num_classes, H, W] - cls_confs = torch.cat(cls_confs_list, dim=1) - # Shape: [batch, num_anchors, num_classes, H * W] - cls_confs = cls_confs.view(batch, num_anchors, num_classes, H * W) - # Shape: [batch, num_anchors, num_classes, H * W] --> [batch, num_anchors * H * W, num_classes] - cls_confs = cls_confs.permute(0, 1, 3, 2).reshape(batch, num_anchors * H * W, num_classes) - - # Apply sigmoid(), exp() and softmax() to slices - # - bxy = torch.sigmoid(bxy) * scale_x_y - 0.5 * (scale_x_y - 1) - bwh = torch.exp(bwh) - det_confs = torch.sigmoid(det_confs) - cls_confs = torch.sigmoid(cls_confs) - - # Prepare C-x, C-y, P-w, P-h (None of them are torch related) - grid_x = np.expand_dims(np.expand_dims(np.expand_dims(np.linspace(0, W - 1, W), axis=0).repeat(H, 0), axis=0), axis=0) - grid_y = np.expand_dims(np.expand_dims(np.expand_dims(np.linspace(0, H - 1, H), axis=1).repeat(W, 1), axis=0), axis=0) - # grid_x = torch.linspace(0, W - 1, W).reshape(1, 1, 1, W).repeat(1, 1, H, 1) - # grid_y = torch.linspace(0, H - 1, H).reshape(1, 1, H, 1).repeat(1, 1, 1, W) - - anchor_w = [] - anchor_h = [] - for i in range(num_anchors): - anchor_w.append(anchors[i * 2]) - anchor_h.append(anchors[i * 2 + 1]) - - device = None - cuda_check = output.is_cuda - if cuda_check: - device = output.get_device() - - bx_list = [] - by_list = [] - bw_list = [] - bh_list = [] - - # Apply C-x, C-y, P-w, P-h - for i in range(num_anchors): - ii = i * 2 - # Shape: [batch, 1, H, W] - bx = bxy[:, ii : ii + 1] + torch.tensor(grid_x, device=device, dtype=torch.float32) # grid_x.to(device=device, dtype=torch.float32) - # Shape: [batch, 1, H, W] - by = bxy[:, ii + 1 : ii + 2] + torch.tensor(grid_y, device=device, dtype=torch.float32) # grid_y.to(device=device, dtype=torch.float32) - # Shape: [batch, 1, H, W] - bw = bwh[:, ii : ii + 1] * anchor_w[i] - # Shape: [batch, 1, H, W] - bh = bwh[:, ii + 1 : ii + 2] * anchor_h[i] - - bx_list.append(bx) - by_list.append(by) - bw_list.append(bw) - bh_list.append(bh) - - - ######################################## - # Figure out bboxes from slices # - ######################################## - - # Shape: [batch, num_anchors, H, W] - bx = torch.cat(bx_list, dim=1) - # Shape: [batch, num_anchors, H, W] - by = torch.cat(by_list, dim=1) - # Shape: [batch, num_anchors, H, W] - bw = torch.cat(bw_list, dim=1) - # Shape: [batch, num_anchors, H, W] - bh = torch.cat(bh_list, dim=1) - - # Shape: [batch, 2 * num_anchors, H, W] - bx_bw = torch.cat((bx, bw), dim=1) - # Shape: [batch, 2 * num_anchors, H, W] - by_bh = torch.cat((by, bh), dim=1) - - # normalize coordinates to [0, 1] - bx_bw /= W - by_bh /= H - - # Shape: [batch, num_anchors * H * W, 1] - bx = bx_bw[:, :num_anchors].view(batch, num_anchors * H * W, 1) - by = by_bh[:, :num_anchors].view(batch, num_anchors * H * W, 1) - bw = bx_bw[:, num_anchors:].view(batch, num_anchors * H * W, 1) - bh = by_bh[:, num_anchors:].view(batch, num_anchors * H * W, 1) - - bx1 = bx - bw * 0.5 - by1 = by - bh * 0.5 - bx2 = bx1 + bw - by2 = by1 + bh - - # Shape: [batch, num_anchors * h * w, 4] -> [batch, num_anchors * h * w, 1, 4] - boxes = torch.cat((bx1, by1, bx2, by2), dim=2).view(batch, num_anchors * H * W, 1, 4) - # boxes = boxes.repeat(1, 1, num_classes, 1) - - # boxes: [batch, num_anchors * H * W, 1, 4] - # cls_confs: [batch, num_anchors * H * W, num_classes] - # det_confs: [batch, num_anchors * H * W] - - det_confs = det_confs.view(batch, num_anchors * H * W, 1) - confs = cls_confs * det_confs - - # boxes: [batch, num_anchors * H * W, 1, 4] - # confs: [batch, num_anchors * H * W, num_classes] - - return boxes, confs - - -def yolo_forward_dynamic(output, conf_thresh, num_classes, anchors, num_anchors, scale_x_y, only_objectness=1, - validation=False): - # Output would be invalid if it does not satisfy this assert - # assert (output.size(1) == (5 + num_classes) * num_anchors) - - # print(output.size()) - - # Slice the second dimension (channel) of output into: - # [ 2, 2, 1, num_classes, 2, 2, 1, num_classes, 2, 2, 1, num_classes ] - # And then into - # bxy = [ 6 ] bwh = [ 6 ] det_conf = [ 3 ] cls_conf = [ num_classes * 3 ] - # batch = output.size(0) - # H = output.size(2) - # W = output.size(3) - - bxy_list = [] - bwh_list = [] - det_confs_list = [] - cls_confs_list = [] - - for i in range(num_anchors): - begin = i * (5 + num_classes) - end = (i + 1) * (5 + num_classes) - - bxy_list.append(output[:, begin : begin + 2]) - bwh_list.append(output[:, begin + 2 : begin + 4]) - det_confs_list.append(output[:, begin + 4 : begin + 5]) - cls_confs_list.append(output[:, begin + 5 : end]) - - # Shape: [batch, num_anchors * 2, H, W] - bxy = torch.cat(bxy_list, dim=1) - # Shape: [batch, num_anchors * 2, H, W] - bwh = torch.cat(bwh_list, dim=1) - - # Shape: [batch, num_anchors, H, W] - det_confs = torch.cat(det_confs_list, dim=1) - # Shape: [batch, num_anchors * H * W] - det_confs = det_confs.view(output.size(0), num_anchors * output.size(2) * output.size(3)) - - # Shape: [batch, num_anchors * num_classes, H, W] - cls_confs = torch.cat(cls_confs_list, dim=1) - # Shape: [batch, num_anchors, num_classes, H * W] - cls_confs = cls_confs.view(output.size(0), num_anchors, num_classes, output.size(2) * output.size(3)) - # Shape: [batch, num_anchors, num_classes, H * W] --> [batch, num_anchors * H * W, num_classes] - cls_confs = cls_confs.permute(0, 1, 3, 2).reshape(output.size(0), num_anchors * output.size(2) * output.size(3), num_classes) - - # Apply sigmoid(), exp() and softmax() to slices - # - bxy = torch.sigmoid(bxy) * scale_x_y - 0.5 * (scale_x_y - 1) - bwh = torch.exp(bwh) - det_confs = torch.sigmoid(det_confs) - cls_confs = torch.sigmoid(cls_confs) - - # Prepare C-x, C-y, P-w, P-h (None of them are torch related) - grid_x = np.expand_dims(np.expand_dims(np.expand_dims(np.linspace(0, output.size(3) - 1, output.size(3)), axis=0).repeat(output.size(2), 0), axis=0), axis=0) - grid_y = np.expand_dims(np.expand_dims(np.expand_dims(np.linspace(0, output.size(2) - 1, output.size(2)), axis=1).repeat(output.size(3), 1), axis=0), axis=0) - # grid_x = torch.linspace(0, W - 1, W).reshape(1, 1, 1, W).repeat(1, 1, H, 1) - # grid_y = torch.linspace(0, H - 1, H).reshape(1, 1, H, 1).repeat(1, 1, 1, W) - - anchor_w = [] - anchor_h = [] - for i in range(num_anchors): - anchor_w.append(anchors[i * 2]) - anchor_h.append(anchors[i * 2 + 1]) - - device = None - cuda_check = output.is_cuda - if cuda_check: - device = output.get_device() - - bx_list = [] - by_list = [] - bw_list = [] - bh_list = [] - - # Apply C-x, C-y, P-w, P-h - for i in range(num_anchors): - ii = i * 2 - # Shape: [batch, 1, H, W] - bx = bxy[:, ii : ii + 1] + torch.tensor(grid_x, device=device, dtype=torch.float32) # grid_x.to(device=device, dtype=torch.float32) - # Shape: [batch, 1, H, W] - by = bxy[:, ii + 1 : ii + 2] + torch.tensor(grid_y, device=device, dtype=torch.float32) # grid_y.to(device=device, dtype=torch.float32) - # Shape: [batch, 1, H, W] - bw = bwh[:, ii : ii + 1] * anchor_w[i] - # Shape: [batch, 1, H, W] - bh = bwh[:, ii + 1 : ii + 2] * anchor_h[i] - - bx_list.append(bx) - by_list.append(by) - bw_list.append(bw) - bh_list.append(bh) - - - ######################################## - # Figure out bboxes from slices # - ######################################## - - # Shape: [batch, num_anchors, H, W] - bx = torch.cat(bx_list, dim=1) - # Shape: [batch, num_anchors, H, W] - by = torch.cat(by_list, dim=1) - # Shape: [batch, num_anchors, H, W] - bw = torch.cat(bw_list, dim=1) - # Shape: [batch, num_anchors, H, W] - bh = torch.cat(bh_list, dim=1) - - # Shape: [batch, 2 * num_anchors, H, W] - bx_bw = torch.cat((bx, bw), dim=1) - # Shape: [batch, 2 * num_anchors, H, W] - by_bh = torch.cat((by, bh), dim=1) - - # normalize coordinates to [0, 1] - bx_bw /= output.size(3) - by_bh /= output.size(2) - - # Shape: [batch, num_anchors * H * W, 1] - bx = bx_bw[:, :num_anchors].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1) - by = by_bh[:, :num_anchors].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1) - bw = bx_bw[:, num_anchors:].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1) - bh = by_bh[:, num_anchors:].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1) - - bx1 = bx - bw * 0.5 - by1 = by - bh * 0.5 - bx2 = bx1 + bw - by2 = by1 + bh - - # Shape: [batch, num_anchors * h * w, 4] -> [batch, num_anchors * h * w, 1, 4] - boxes = torch.cat((bx1, by1, bx2, by2), dim=2).view(output.size(0), num_anchors * output.size(2) * output.size(3), 1, 4) - # boxes = boxes.repeat(1, 1, num_classes, 1) - - # boxes: [batch, num_anchors * H * W, 1, 4] - # cls_confs: [batch, num_anchors * H * W, num_classes] - # det_confs: [batch, num_anchors * H * W] - - det_confs = det_confs.view(output.size(0), num_anchors * output.size(2) * output.size(3), 1) - confs = cls_confs * det_confs - - # boxes: [batch, num_anchors * H * W, 1, 4] - # confs: [batch, num_anchors * H * W, num_classes] - - return boxes, confs - -class YoloLayer(nn.Module): - ''' Yolo layer - model_out: while inference,is post-processing inside or outside the model - true:outside - ''' - def __init__(self, anchor_mask=[], num_classes=0, anchors=[], num_anchors=1, stride=32, model_out=False): - super(YoloLayer, self).__init__() - self.anchor_mask = anchor_mask - self.num_classes = num_classes - self.anchors = anchors - self.num_anchors = num_anchors - self.anchor_step = len(anchors) // num_anchors - self.coord_scale = 1 - self.noobject_scale = 1 - self.object_scale = 5 - self.class_scale = 1 - self.thresh = 0.6 - self.stride = stride - self.seen = 0 - self.scale_x_y = 1 - - self.model_out = model_out - - def forward(self, output, target=None): - if self.training: - return output - masked_anchors = [] - for m in self.anchor_mask: - masked_anchors += self.anchors[m * self.anchor_step:(m + 1) * self.anchor_step] - masked_anchors = [anchor / self.stride for anchor in masked_anchors] - - return yolo_forward_dynamic(output, self.thresh, self.num_classes, masked_anchors, len(self.anchor_mask),scale_x_y=self.scale_x_y) - diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/detect_objects.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/detect_objects.py deleted file mode 100644 index c799216d1..000000000 --- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/detect_objects.py +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env python3 - -import rospy -from lasr_vision_msgs.srv import YoloDetection, YoloDetectionRequest -from sensor_msgs.msg import Image - - -def detect_objects(object_names: [str], model="coco"): - """ - Detects all persons in an image - """ - rospy.wait_for_service('yolo_object_detection_server/detect_objects') - - if not isinstance(object_names, list): - raise ValueError("please input a list of strings") - - try: - detect_objects = rospy.ServiceProxy('yolo_object_detection_server/detect_objects', YoloDetection) - image_msg = rospy.wait_for_message('/xtion/rgb/image_raw', Image) # wait for rgb image - - req = YoloDetectionRequest() - req.image_raw = image_msg - req.dataset = model - req.confidence = 0.7 - req.nms = 0.3 - - yolo_resp = detect_objects(req) - except rospy.ServiceException as e: - print("Service call failed: %s" % e) - return [] - - objects = [] - for obj in yolo_resp.detected_objects: - if obj.name in object_names: - objects.append(obj) - return objects - - -if __name__ == '__main__': - rospy.init_node("objects_detection", anonymous=True) - objects = detect_objects(["person", "mug", "phone"]) - for o in objects: - print("object name: ", o.name) - print("object confidence: ", o.confidence) - print("object position: ", o.xywh) - diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/detect_objects_v8.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/detect_objects_v8.py deleted file mode 100755 index f71cf2a91..000000000 --- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/detect_objects_v8.py +++ /dev/null @@ -1,314 +0,0 @@ -#!/usr/bin/env python3 - -import rospy -from lasr_vision_msgs.srv import YoloDetection, YoloDetectionRequest -from sensor_msgs.msg import Image -from common_math import pcl_msg_to_cv2, seg_to_centroid -from std_msgs.msg import String -from geometry_msgs.msg import PointStamped, Point -from tf_module.srv import TfTransform, TfTransformRequest -import numpy as np -from lasr_shapely import LasrShapely -from cv_bridge3 import CvBridge -from sensor_msgs.msg import PointCloud2 -import cv2 -# from tiago_controllers.helpers.nav_map_helpers import is_close_to_object, rank - - - -def detect_objects(object_names: [str], confidence=0.25, nms=0.4, model="yolov8n.pt"): - """ - Detects all persons in an image using yolov8 - """ - rospy.wait_for_service("/yolov8/detect", rospy.Duration(15.0)) - - if not isinstance(object_names, list): - raise ValueError("please input a list of strings") - - try: - detect_service = rospy.ServiceProxy('/yolov8/detect', YoloDetection) - - if rospy.get_published_topics(namespace='/camera/image_raw'): - image_msg = rospy.wait_for_message('/camera/image_raw', Image) # wait for depth image - elif rospy.get_published_topics(namespace='/xtion/rgb/image_raw'): - image_msg = rospy.wait_for_message('/xtion/rgb/image_raw', Image) - elif rospy.get_published_topics(namespace='/usb_cam/image_raw'): - image_msg = rospy.wait_for_message('/usb_cam/image_raw', Image) # wait for rgb image - - - req = YoloDetectionRequest() - req.image_raw = image_msg - req.dataset = model - req.confidence = confidence - req.nms = nms - resp = detect_service(req) - print(resp) - - except rospy.ServiceException as e: - print("Service call failed: %s" % e) - return [] - - objects = [] - for obj in resp.detected_objects: - if obj.name in object_names: - objects.append(obj) - return objects - -def estimate_pose(tf, pcl_msg, detection): - centroid_xyz = seg_to_centroid(pcl_msg, np.array(detection.xyseg)) - centroid = PointStamped() - centroid.point = Point(*centroid_xyz) - centroid.header = pcl_msg.header - tf_req = TfTransformRequest() - tf_req.target_frame = String("map") - tf_req.point = centroid - response = tf(tf_req) - # print("response") - # print(response) - return np.array([response.target_point.point.x, response.target_point.point.y, response.target_point.point.z]) -# def perform_detection(yolo,tf, bridge, shapely, pcl_msg, polygon, filter, model="yolov8n-seg.pt"): -# cv_im = pcl_msg_to_cv2(pcl_msg) -# img_msg = bridge.cv2_to_imgmsg(cv_im) -# detections = yolo(img_msg, model, 0.5, 0.3) -# rospy.loginfo(detections) -# detections = [(det, estimate_pose(tf, pcl_msg, det)) for det in detections.detected_objects if -# det.name in filter] -# rospy.loginfo(f"All: {[(det.name, pose) for det, pose in detections]}") -# rospy.loginfo(f"Boundary: {polygon}") -# satisfied_points = shapely.are_points_in_polygon_2d(polygon, [[pose[0], pose[1]] for (_, pose) in -# detections]).inside -# detections = [detections[i] for i in range(0, len(detections)) if satisfied_points[i]] -# rospy.loginfo(f"Filtered: {[(det.name, pose) for det, pose in detections]}") -# print(len(detections)) -# return detections, img_msg - - -# for the comp -def perform_detection(default, pcl_msg, polygon, filter, model="yolov8n-seg.pt"): - cv_im = pcl_msg_to_cv2(pcl_msg) - img_msg = default.bridge.cv2_to_imgmsg(cv_im) - detections = default.yolo(img_msg, model, 0.5, 0.3) - # rospy.loginfo(detections) - detections = [(det, estimate_pose(default.tf, pcl_msg, det)) for det in detections.detected_objects if - det.name in filter] - - # rospy.loginfo(f"All: {[(det.name, pose) for det, pose in detections]}") - # rospy.loginfo(f"Boundary: {polygon}") - if polygon is None: - return detections, img_msg - else: - satisfied_points = default.shapely.are_points_in_polygon_2d(polygon, [[pose[0], pose[1]] for (_, pose) in - detections]).inside - detections = [detections[i] for i in range(0, len(detections)) if satisfied_points[i]] - # rospy.loginfo(f"Filtoprintered: {[(det.name, pose) for det, pose in detections]}") - print("len detections --->") - print(len(detections)) - return detections, img_msg - - -def debug(image, resp): - rospy.loginfo("Received image message") - try: - # pick the first detection as an example - if len(resp) > 0: - detection = resp[0][0] - - if len(detection.xyseg) > 0: - # unflatten the array - contours = np.array(detection.xyseg).reshape(-1, 2) - - # draw using opencv - img = np.zeros((image.height, image.width), dtype=np.uint8) - cv2.fillPoly(img, pts = [contours], color = (255,255,255)) - - # send to topic - # img_msg = bridge.cv2_to_imgmsg(img, encoding="passthrough") - # debug_publisher1.publish(img_msg) - debug_publisher1.publish(image) - else: - print('WARN: No segmentation was performed on the image!') - else: - print('WARN: no detections') - - # draw all of them - if len(resp) > 0: - img = np.zeros((image.height, image.width), dtype=np.uint8) - for detection in resp: - detection = detection[0] - if len(detection.xyseg) > 0: - contours = np.array(detection.xyseg).reshape(-1, 2) - r,g,b = np.random.randint(0, 255, size=3) - cv2.fillPoly(img, pts = [contours], color = (int(r), int(g), int(b))) - img_msg = bridge.cv2_to_imgmsg(img, encoding="passthrough") - debug_publisher2.publish(img_msg) - else: - print('WARN: no detections') - - except rospy.ServiceException as e: - rospy.logerr("Service call failed: %s" % e) - -def is_anyone_in_front_of_me(yolo,tf, bridge, shapely, pcl_msg, polygon, filter, model="yolov8n-seg.pt"): - pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) - polygon = rospy.get_param('lift_position_points') - print(polygon) - detections, im = perform_detection(yolo, tf, bridge, shapely, pcl_msg, polygon, filter, model="yolov8n-seg.pt") - return len(detections) > 0 - -DIST_THRESH = 0.1 - -def euclidean_distance(point1, point2): - return np.sqrt((point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2 + (point1[2] - point2[2]) ** 2) -# def match_detections_between_frames(det1, det2): -# matches = {} -# -# for i, position1 in enumerate(det1): -# closest_distance = float('inf') -# closest_position = None -# -# for j, position2 in enumerate(det2): -# distance = euclidean_distance(position1, position2) -# if distance < closest_distance: -# closest_distance = distance -# closest_position = position2 -# -# if closest_position is not None: -# matches[i] = closest_position -# -# robot_position = [robot_x, robot_y, robot_z] -# -# for i, position2 in matches.values(): -# vector = np.array(position2) - np.array(det1[i]) -# -# vector_to_robot = np.array(robot_position) - np.array(det1[i]) -# dot_product = np.dot(vector, vector_to_robot) -# -# if dot_product > 0: -# print(f"Position {i + 1} in frame 1 faces the robot.") -# else: -# print(f"Position {i + 1} in frame 1 does not face the robot.") -# -# static_pos = [] -# moving_pos = [] -# -# for i, position2 in matches.items(): -# initial_position = det1[i] -# final_position = position2 -# -# initial_distance_to_robot = euclidean_distance(initial_position, robot_position) -# final_distance_to_robot = euclidean_distance(final_position, robot_position) -# -# if final_distance_to_robot < initial_distance_to_robot: -# # moved closer -# moving_pos.append(i) -# elif final_distance_to_robot > initial_distance_to_robot: -# #mode further -# pass -# else: -# # remained the same -# static_pos.append(i) -# -# # face the quat -# return static_pos, moving_pos - - -def phase1(yolo, tf, bridge, shapely, pcl_msg, polygon, filter, model): - # get two frames at the time - pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) - polygon = rospy.get_param('test_lift_points') - detections1, im = perform_detection(yolo, tf, bridge, shapely, pcl_msg, polygon, filter, model) - rospy.sleep(5) - pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) - detections2, im = perform_detection(yolo, tf, bridge, shapely, pcl_msg, polygon, filter, model) - while len(detections1) != len(detections2): - detections1, im = perform_detection(yolo, tf, bridge, shapely, pcl_msg, polygon, filter, model) - rospy.sleep(5) - pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) - detections2, im = perform_detection(yolo, tf, bridge, shapely, pcl_msg, polygon, filter, model) - - # match the detections based on euclidean distance - matching = {} - not_matching = [] - for i, det1 in enumerate(detections1): - for j, det2 in enumerate(detections2): - if np.linalg.norm(det1[1] - det2[1]) < DIST_THRESH: - matching[i] = j - else: - not_matching.append(j) - - # print(not_matching) - - - - - # if the - - - - - -if __name__ == '__main__': - rospy.init_node("objects_detection_yolov8", anonymous=True) - - # object detection yolo v8 - # objects = detect_objects(["person", "mug", "phone"]) - # for o in objects: - # print("object name: ", o.name) - # print("object confidence: ", o.confidence) - # print("object position: ", o.xywh) - debug_publisher1 = rospy.Publisher('/yolov8/debug_mask', Image, queue_size=1) - debug_publisher2 = rospy.Publisher('/yolov8/debug_mask_all', Image, queue_size=1) - - # perform detection and segmentation - rospy.wait_for_service('/yolov8/detect') - yolo = rospy.ServiceProxy('/yolov8/detect', YoloDetection) - rospy.wait_for_service("/tf_transform") - tf = rospy.ServiceProxy("/tf_transform", TfTransform) - shapely = LasrShapely() - rospy.loginfo("Got shapely") - bridge = CvBridge() - rospy.loginfo("CV Bridge") - - # yolo seg only - # print("now calling perform detection") - # pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) - # polygon = rospy.get_param('test_lift_points') - # detections, im = perform_detection(yolo, tf, bridge, shapely, pcl_msg, polygon, ["person"], "yolov8n-seg.pt") - # debug(im, detections) - # print("now printing detections") - # print(len(detections)) - # pos_people = [] - # for i, person in detections: - # print(person) - # pos_people.append([person[0], person[1]]) - # - # print(person[0], person[1]) - # - # print(pos_people) - # yolo seg only - - pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) - polygon = rospy.get_param('test_lift_points') - detections, im = perform_detection(yolo, tf, bridge, shapely, pcl_msg, polygon, ["person"], "yolov8n-seg.pt") - - pos_people = [] - for i, person in detections: - # print(person) - person = person.tolist() - # print(type(person)) - pos_people.append([person[0], person[1]]) - - num_people = len(detections) - - rospy.set_param("/lift/num_people", num_people) - rospy.set_param("/lift/pos_persons", pos_people) - - pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) - polygon = rospy.get_param('test_lift_points') - # print("is anyone in front of me >") - # print(is_anyone_in_front_of_me(yolo, tf, bridge, shapely, pcl_msg, polygon, ["person"], "yolov8n-seg.pt")) - # print("is anyone in front of me") - - # detections of i and then th esecond from the tuple - # print(detections[0][1]) - rospy.spin() - diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/yolo_client.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/yolo_client.py deleted file mode 100755 index 9a6161588..000000000 --- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/yolo_client.py +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env python3 - -import rospy - -from lasr_vision_msgs.srv import YoloDetection, YoloDetectionRequest -from sensor_msgs.msg import Image -from cv_bridge3 import CvBridge -import cv2 - -if __name__ == '__main__': - rospy.init_node("yolo_client", anonymous=True) - bridge = CvBridge() - while not rospy.is_shutdown(): - rospy.wait_for_service('yolo_object_detection_server/detect_objects') - im = rospy.wait_for_message('/usb_cam/image_raw', Image) - image = bridge.imgmsg_to_cv2(im, desired_encoding='passthrough') - cv2.imwrite('hello.jpg', image) - req = YoloDetectionRequest() - req.image_raw= im - req.dataset ='coco' - req.confidence = 0.5 - req.nms = 0.3 - print('hello') - server = rospy.ServiceProxy('yolo_object_detection_server/detect_objects', YoloDetection) - detections = server(req).detected_objects - print( - f"DETECTED:{detections}\n" - ) \ No newline at end of file diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/yolo_object_detection_server.py b/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/yolo_object_detection_server.py deleted file mode 100755 index 648e6b35b..000000000 --- a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/yolo_object_detection_server.py +++ /dev/null @@ -1,184 +0,0 @@ -#!/usr/bin/env python3 -import rospy -import rospkg - -import torch -import torchvision.transforms as transforms - -import cv2 -from PIL import Image as PIL_Image -import numpy as np - -import os -import time - -from darknet_pytorch.darknet import Darknet -from darknet_pytorch.utils import post_processing - -from lasr_vision_msgs.srv import YoloDetection, YoloDetectionResponse -from lasr_vision_msgs.msg import Detection - -import nvidia_smi - - -MODEL_ROOT = os.path.join(rospkg.RosPack().get_path('lasr_object_detection_yolo'), 'models') - - -def transform(): - return transforms.Compose([ - transforms.Resize((416, 416)), - transforms.ToTensor(), - ]) - -class YoloObjectDetectionServer(): - - def __init__(self): - - self.model_name = None - self.yolov4 = None - self.labels = [] - self.device = 'cpu' - - def load_model(self, model_name): - model_path = os.path.join(MODEL_ROOT, model_name) - - if os.path.isdir(model_path): - self.model_name = model_name - - start_time = time.time() - - try: - - with open(os.path.join(model_path, 'classes.txt')) as fp: - self.labels = fp.read().strip().splitlines() - - except FileNotFoundError: - rospy.logerr(f"Couldn't load {self.model_name}, 'classes.txt' does not exist in {model_path}.") - return False - - try: - - with open(os.path.join(model_path, 'yolov4.cfg')) as fp: - self.yolov4 = Darknet(os.path.join(model_path, 'yolov4.cfg')) - - except FileNotFoundError: - rospy.logerr(f"Couldn't load {self.model_name}, 'yolov4.cfg' does not exist in {model_path}.") - return False - - try: - self.yolov4.load_weights(os.path.join(model_path, 'yolov4.weights')) - - except FileNotFoundError: - rospy.logerr(f"Couldn't load {self.model_name}, 'yolov4.weights' does not exist in {model_path}.") - return False - - self.yolov4.eval() - """ - if torch.cuda.is_available(): - - # Initialise nvidia-smi - nvidia_smi.nvmlInit() - - # Assume a single GPU. - handle = nvidia_smi.nvmlDeviceGetHandleByIndex(0) - - # Get GPU memory info. - info = nvidia_smi.nvmlDeviceGetMemoryInfo(handle) - - if info.free / 1024**2 > 1024: - self.device = 'cuda' - - # Shutdown nvidia-smi - nvidia_smi.nvmlShutdown() - """ - self.device = "cpu" - - print(self.device) - self.yolov4.to(self.device) - - rospy.loginfo('Time to load {} model: {:.2f} seconds'.format(model_name, time.time() - start_time)) - - return True - - def detect(self, req): - - response = YoloDetectionResponse() - - # Only load model if it is not already loaded. - if not self.model_name == req.dataset: - if not self.load_model(req.dataset): - raise rospy.ServiceException(f"Couldn't load model '{req.dataset}'") - - # Random colours for bounding boxes. - np.random.seed(42) - COLOURS = np.random.randint(0, 255, size=(len(self.labels), 3), dtype="uint8") - - # Perform pre-processing. - frame = np.frombuffer(req.image_raw.data, dtype=np.uint8).reshape(req.image_raw.height, req.image_raw.width, -1) - image = PIL_Image.fromarray(frame) - image = torch.stack([transform()(image)]).to(self.device) - - outputs = self.yolov4(image) - - # net forward and non-mean suppression - #try: - #except RuntimeError: - # if self.device != 'cpu': - # self.device = 'cpu' - #self.yolov4.to(self.device) - # return self.detect(req) - # else: - # raise rospy.ServiceException("Couldn't use CUDA or CPU....") - outputs = post_processing(image, req.confidence, req.nms, outputs) - - if not outputs[0] is None: - for detection in outputs[0]: - - # Class ID of detection. - idx = np.argmax(detection[5:]) - - # Convert bounding box to image co-ordinates. - bbox = detection[:4] - bbox[0] *= req.image_raw.width - bbox[1] *= req.image_raw.height - bbox[2] *= req.image_raw.width - bbox[3] *= req.image_raw.height - x1,y1,x2,y2 = [int(i) for i in bbox] - - - obj_conf, class_score = detection[4:6] - class_pred = int(detection[6]) - - # Draw and label bounding box of object in the frame. - name = self.labels[class_pred] - confidence = class_score - x, y, w, h = x1, y1, x2 - x1, y2 - y1 - color = [int(c) for c in COLOURS[idx]] - cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2) - text = "{}: {:.4f}".format(name, confidence) - cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) - - # Check detection is above the confidence threshold. - if class_score > req.confidence: - xywh = [x1, y1, x2 - x1, y2 - y1] - - # Append detection. - response.detected_objects.append( - Detection( - name=name, - confidence=class_score, - xywh=xywh - ) - ) - print(response.detected_objects, 'i am in yolo detect ') - return response - -if __name__ == '__main__': - rospy.init_node('yolo_object_detection_server') - server = YoloObjectDetectionServer() - serv = rospy.Service('yolo_object_detection_server/detect_objects', YoloDetection, server.detect) - rospy.loginfo('YOLOv4 object detection service initialised') - rospy.spin() - - - diff --git a/legacy/lasr_object_detection_yolo/srv/YoloDetection.srv b/legacy/lasr_object_detection_yolo/srv/YoloDetection.srv deleted file mode 100644 index f23dbf29f..000000000 --- a/legacy/lasr_object_detection_yolo/srv/YoloDetection.srv +++ /dev/null @@ -1,6 +0,0 @@ -sensor_msgs/Image image_raw -string dataset -float32 confidence -float32 nms ---- -lasr_vision_msgs/Detection[] detected_objects \ No newline at end of file diff --git a/legacy/lasr_object_detection_yolo/train/.template/yolov4-tiny.cfg b/legacy/lasr_object_detection_yolo/train/.template/yolov4-tiny.cfg deleted file mode 100644 index dc6f5bfb8..000000000 --- a/legacy/lasr_object_detection_yolo/train/.template/yolov4-tiny.cfg +++ /dev/null @@ -1,281 +0,0 @@ -[net] -# Testing -#batch=1 -#subdivisions=1 -# Training -batch=64 -subdivisions=1 -width=416 -height=416 -channels=3 -momentum=0.9 -decay=0.0005 -angle=0 -saturation = 1.5 -exposure = 1.5 -hue=.1 - -learning_rate=0.00261 -burn_in=1000 -max_batches = 500200 -policy=steps -steps=400000,450000 -scales=.1,.1 - -[convolutional] -batch_normalize=1 -filters=32 -size=3 -stride=2 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=64 -size=3 -stride=2 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=64 -size=3 -stride=1 -pad=1 -activation=leaky - -[route] -layers=-1 -groups=2 -group_id=1 - -[convolutional] -batch_normalize=1 -filters=32 -size=3 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=32 -size=3 -stride=1 -pad=1 -activation=leaky - -[route] -layers = -1,-2 - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=leaky - -[route] -layers = -6,-1 - -[maxpool] -size=2 -stride=2 - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=leaky - -[route] -layers=-1 -groups=2 -group_id=1 - -[convolutional] -batch_normalize=1 -filters=64 -size=3 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=64 -size=3 -stride=1 -pad=1 -activation=leaky - -[route] -layers = -1,-2 - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[route] -layers = -6,-1 - -[maxpool] -size=2 -stride=2 - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=leaky - -[route] -layers=-1 -groups=2 -group_id=1 - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=leaky - -[route] -layers = -1,-2 - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[route] -layers = -6,-1 - -[maxpool] -size=2 -stride=2 - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=1 -pad=1 -activation=leaky - -################################## - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=1 -pad=1 -activation=leaky - -[convolutional] -size=1 -stride=1 -pad=1 -filters=255 -activation=linear - - - -[yolo] -mask = 3,4,5 -anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319 -classes=80 -num=6 -jitter=.3 -scale_x_y = 1.05 -cls_normalizer=1.0 -iou_normalizer=0.07 -iou_loss=ciou -ignore_thresh = .7 -truth_thresh = 1 -random=0 -resize=1.5 -nms_kind=greedynms -beta_nms=0.6 - -[route] -layers = -4 - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[upsample] -stride=2 - -[route] -layers = -1, 23 - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=leaky - -[convolutional] -size=1 -stride=1 -pad=1 -filters=255 -activation=linear - -[yolo] -mask = 1,2,3 -anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319 -classes=80 -num=6 -jitter=.3 -scale_x_y = 1.05 -cls_normalizer=1.0 -iou_normalizer=0.07 -iou_loss=ciou -ignore_thresh = .7 -truth_thresh = 1 -random=0 -resize=1.5 -nms_kind=greedynms -beta_nms=0.6 diff --git a/legacy/lasr_object_detection_yolo/train/.template/yolov4.cfg b/legacy/lasr_object_detection_yolo/train/.template/yolov4.cfg deleted file mode 100644 index f43259bf7..000000000 --- a/legacy/lasr_object_detection_yolo/train/.template/yolov4.cfg +++ /dev/null @@ -1,1157 +0,0 @@ -[net] -batch=64 -subdivisions=8 -# Training -#width=512 -#height=512 -width=608 -height=608 -channels=3 -momentum=0.949 -decay=0.0005 -angle=0 -saturation = 1.5 -exposure = 1.5 -hue=.1 - -learning_rate=0.0013 -burn_in=1000 -max_batches = 500500 -policy=steps -steps=400000,450000 -scales=.1,.1 - -#cutmix=1 -mosaic=0 - -#:104x104 54:52x52 85:26x26 104:13x13 for 416 - -[convolutional] -batch_normalize=1 -filters=32 -size=3 -stride=1 -pad=1 -activation=mish - -# Downsample - -[convolutional] -batch_normalize=1 -filters=64 -size=3 -stride=2 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -2 - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=32 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=64 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -1,-7 - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -# Downsample - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=2 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -2 - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=64 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=64 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -1,-10 - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -# Downsample - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=2 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -2 - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -1,-28 - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -# Downsample - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=2 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -2 - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -1,-28 - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=mish - -# Downsample - -[convolutional] -batch_normalize=1 -filters=1024 -size=3 -stride=2 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -2 - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -1,-16 - -[convolutional] -batch_normalize=1 -filters=1024 -size=1 -stride=1 -pad=1 -activation=mish - -########################## - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=1024 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -### SPP ### -[maxpool] -stride=1 -size=5 - -[route] -layers=-2 - -[maxpool] -stride=1 -size=9 - -[route] -layers=-4 - -[maxpool] -stride=1 -size=13 - -[route] -layers=-1,-3,-5,-6 -### End SPP ### - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=1024 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[upsample] -stride=2 - -[route] -layers = 85 - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[route] -layers = -1, -3 - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=512 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=512 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[upsample] -stride=2 - -[route] -layers = 54 - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[route] -layers = -1, -3 - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=256 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=256 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -########################## - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=256 -activation=leaky - -[convolutional] -size=1 -stride=1 -pad=1 -filters=255 -activation=linear - - -[yolo] -mask = 0,1,2 -anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401 -classes=80 -num=9 -jitter=.3 -ignore_thresh = .7 -truth_thresh = 1 -scale_x_y = 1.2 -iou_thresh=0.213 -cls_normalizer=1.0 -iou_normalizer=0.07 -iou_loss=ciou -nms_kind=greedynms -beta_nms=0.6 -max_delta=5 - - -[route] -layers = -4 - -[convolutional] -batch_normalize=1 -size=3 -stride=2 -pad=1 -filters=256 -activation=leaky - -[route] -layers = -1, -16 - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=512 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=512 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=512 -activation=leaky - -[convolutional] -size=1 -stride=1 -pad=1 -filters=255 -activation=linear - - -[yolo] -mask = 3,4,5 -anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401 -classes=80 -num=9 -jitter=.3 -ignore_thresh = .7 -truth_thresh = 1 -scale_x_y = 1.1 -iou_thresh=0.213 -cls_normalizer=1.0 -iou_normalizer=0.07 -iou_loss=ciou -nms_kind=greedynms -beta_nms=0.6 -max_delta=5 - - -[route] -layers = -4 - -[convolutional] -batch_normalize=1 -size=3 -stride=2 -pad=1 -filters=512 -activation=leaky - -[route] -layers = -1, -37 - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=1024 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=1024 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=1024 -activation=leaky - -[convolutional] -size=1 -stride=1 -pad=1 -filters=255 -activation=linear - - -[yolo] -mask = 6,7,8 -anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401 -classes=80 -num=9 -jitter=.3 -ignore_thresh = .7 -truth_thresh = 1 -random=1 -scale_x_y = 1.05 -iou_thresh=0.213 -cls_normalizer=1.0 -iou_normalizer=0.07 -iou_loss=ciou -nms_kind=greedynms -beta_nms=0.6 -max_delta=5 \ No newline at end of file diff --git a/legacy/lasr_object_detection_yolo/train/Dockerfile b/legacy/lasr_object_detection_yolo/train/Dockerfile deleted file mode 100644 index a53eb4553..000000000 --- a/legacy/lasr_object_detection_yolo/train/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM python:3 as builder - -COPY . . -ENV PATH $PATH:/scripts - -ENV USE_GPU 0 -ENV USE_OPENCV 0 -ENV USE_OPENMP 0 -ENV USE_CUDNN 0 - -RUN mkdir datasets config weights -RUN build_darknet.sh - -ENTRYPOINT ["train.sh"] \ No newline at end of file diff --git a/legacy/lasr_object_detection_yolo/train/README.md b/legacy/lasr_object_detection_yolo/train/README.md deleted file mode 100644 index bcfca6a79..000000000 --- a/legacy/lasr_object_detection_yolo/train/README.md +++ /dev/null @@ -1,25 +0,0 @@ -## Train YOLOv4 - -For generating darknet config files and training YOLOv4. - -# Instructions - -`docker build -t darknet .` - -Mount the your data to /dataset/data: - -`docker run -v data:/datasets/data darknet` - -To run with GPU / CUDNN / OPENCV / OPENMP: - -`docker run -e USE_GPU=1 -v data:/datasets/data darknet` - -# Details - -**build_darknet.sh** builds darknet from source, modify this to change compilation options. - -**train.sh** for training YOLOv4. Usage is ./train.sh DATASET, where DATASET is a directory name in **datasets** (you must create this root directory). - -If you encounter issues with darknet, you may want to alter the **batch_size** and **subdivisions**. - -Run outside of ROS environment. \ No newline at end of file diff --git a/legacy/lasr_object_detection_yolo/train/scripts/build_darknet.sh b/legacy/lasr_object_detection_yolo/train/scripts/build_darknet.sh deleted file mode 100755 index 96694a266..000000000 --- a/legacy/lasr_object_detection_yolo/train/scripts/build_darknet.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -git clone https://github.com/AlexeyAB/darknet -make -C darknet GPU=$USE_GPU OPENCV=$USE_OPENCV CUDNN=$USE_CUDNN OPENMP=$USE_OPENMP -wget -P pretrained_weights https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v3_optimal/yolov4.conv.137 \ No newline at end of file diff --git a/legacy/lasr_object_detection_yolo/train/scripts/generate_config_files.py b/legacy/lasr_object_detection_yolo/train/scripts/generate_config_files.py deleted file mode 100644 index 39304b059..000000000 --- a/legacy/lasr_object_detection_yolo/train/scripts/generate_config_files.py +++ /dev/null @@ -1,123 +0,0 @@ -import os -import sys -import random - -# validate input -if len(sys.argv) < 2 or len(sys.argv) > 3: - print('usage: python makeTestFile.py' + \ - ' [full path to lasr_darknet_config for use on different system]') - sys.exit() - -# get dataset name -dataset = sys.argv[1] - -# lasr_darknet_config path - -lasr_path = os.path.dirname(os.path.realpath(__file__)) if not os.path.exists("/.dockerenv") else "" - -# folder paths -cfg_path = lasr_path + '/config/' + dataset -img_path = lasr_path + '/datasets/' + dataset -bkp_path = lasr_path + '/weights/' + dataset - -# file paths -data_path = cfg_path + '/' + dataset + '.data' -train_path = cfg_path + '/train_' + dataset + '.txt' -valid_path = cfg_path + '/valid_' + dataset + '.txt' -names_path = img_path + '/classes.txt' -yv4_path = cfg_path + '/yolov4.cfg' -yv4tiny_path = cfg_path + '/yolov4-tiny.cfg' -yv4tiny_tplt_path = lasr_path + '/.template/yolov4-tiny.cfg' -yv4_tplt_path = lasr_path + '/.template/yolov4.cfg' - -# validate paths -# write something here!!!! - -# create dirs -if not os.path.exists(cfg_path): - os.makedirs(cfg_path) -if not os.path.exists(bkp_path): - os.makedirs(bkp_path) - -# get number of classes -classes = 0 -with open(names_path) as f: - for line in f: - classes += 1 - -# SET GEN PATHS -if len(sys.argv) == 3: - gen_path = sys.argv[2] -else: - gen_path = lasr_path - -# set gen paths -cfg_gen_path = gen_path + '/config/' + dataset -img_gen_path = gen_path + '/datasets/' + dataset -bkp_gen_path = gen_path + '/weights/' + dataset -train_gen_path = cfg_gen_path + '/train_' + dataset + '.txt' -valid_gen_path = cfg_gen_path + '/valid_' + dataset + '.txt' -names_gen_path = img_gen_path + '/classes.txt' - -# --- CREATE TRAIN AND VALID FILES --- # -train_file = open(train_path, 'w') -valid_file = open(valid_path, 'w') - -# find jpgs in "datasets" folder and add all paths to train and valid lists -for root, dirs, files in os.walk(img_path): - for filename in files: - if filename.endswith('.jpg'): - text_out = img_gen_path + '/' + filename + '\n' - # one in ten files is a validation image - if (random.randint(-1, 10)): - train_file.write(text_out) - else: - valid_file.write(text_out) - -train_file.close() -valid_file.close() - - - -# --- CREATE DATA FILE --- # -data_file = open(data_path, 'w') -data_file.write('classes = ' + str(classes) + '\n') -data_file.write('train = ' + train_gen_path + '\n') -data_file.write('valid = ' + valid_gen_path + '\n') -data_file.write('names = ' + names_gen_path + '\n') -data_file.write('backup = ' + bkp_gen_path + '\n') - - - -# --- CREATE YOLOV4 TINY AND YOLOV4 CFG FILES --- # -cfg_file = [open(yv4tiny_path, 'w'), open(yv4_path, 'w')] -templates = [yv4tiny_tplt_path, yv4_tplt_path] - -max_batches = classes * 2000 -filters = (classes + 5) * 3 - -for i in range(2): - filters_lines = [] - filters_last = 0 - - # first pass - find filters lines to be replaced - with open(templates[i]) as f: - for lineno, line in enumerate(f): - if line.startswith('filters'): - filters_last = lineno - elif line.startswith('[yolo]'): - filters_lines.append(filters_last) - - # second pass - copy lines to new cfg with replacement - with open(templates[i]) as f: - for lineno, line in enumerate(f): - if lineno in filters_lines: - cfg_file[i].write('filters=' + str(filters) + '\n') - elif line.startswith('classes'): - cfg_file[i].write('classes=' + str(classes) + '\n') - elif line.startswith('max_batches'): - cfg_file[i].write('max_batches=' + str(max_batches) + '\n') - elif line.startswith('steps='): - cfg_file[i].write('steps=' + str((max_batches / 10) * 8) + ',' + str((max_batches / 10) * 9) + '\n') - else: - cfg_file[i].write(line) diff --git a/legacy/lasr_object_detection_yolo/train/scripts/train.sh b/legacy/lasr_object_detection_yolo/train/scripts/train.sh deleted file mode 100755 index 4d1d0e62a..000000000 --- a/legacy/lasr_object_detection_yolo/train/scripts/train.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -DATASET=$1 -python scripts/generate_config_files.py $DATASET -./darknet/darknet detector train config/$DATASET/$DATASET.data config/$DATASET/yolov4.cfg pretrained_weights/yolov4.conv.137 \ No newline at end of file diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/__init__.py b/legacy/lasr_shapely/CATKIN_IGNORE similarity index 100% rename from legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/__init__.py rename to legacy/lasr_shapely/CATKIN_IGNORE diff --git a/legacy/lasr_web_server/README.md b/legacy/lasr_web_server/README.md deleted file mode 100644 index 4f19e002f..000000000 --- a/legacy/lasr_web_server/README.md +++ /dev/null @@ -1,63 +0,0 @@ -# lasr_web_server - -The lasr_web_server package - -This package is maintained by: -- [jared](mailto:jared@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) - -- [rosbridge_suite](http://wiki.ros.org/rosbridge_suite) -- [web_video_server](http://wiki.ros.org/web_video_server) - -## Usage - -The web server will be typically run alongside the entire Interaction stack (see the Interaction README). -However, if you wish to run in isolation, run the following: - -`roslaunch lasr_web_server web_server.launch` - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -The following topics are maintained by the web server: - -| Topic | Message Type | Functionality | -| ------------------------------- | --------------- | ---------------------------------------------------------------------- | -| /lasr_web_server/set_input_text | std_msgs/String | Publish to this topic to set the placeholder text of the input box. | -| /lasr_web_server/text_input | std_msgs/String | Publishes submitted user input. Subscribe to this topic to receive it. | - -The web server is built using the following technologies: - -- [roslibjs](http://wiki.ros.org/roslibjs) -- [rosbridge](http://wiki.ros.org/rosbridge_suite) -- websocket -- [Flask](https://flask.palletsprojects.com/en/2.2.x/) -- Bootstrap, HTML, CSS, JavaScript - -## ROS Definitions - -### Launch Files - -#### `web_server` - -No description provided. - - -### Messages - -This package has no messages. - -### Services - -This package has no services. - -### Actions - -This package has no actions. diff --git a/legacy/lasr_web_server/doc/PREREQUISITES.md b/legacy/lasr_web_server/doc/PREREQUISITES.md deleted file mode 100644 index 4d3efc7b7..000000000 --- a/legacy/lasr_web_server/doc/PREREQUISITES.md +++ /dev/null @@ -1,2 +0,0 @@ -- [rosbridge_suite](http://wiki.ros.org/rosbridge_suite) -- [web_video_server](http://wiki.ros.org/web_video_server) diff --git a/legacy/lasr_web_server/doc/TECHNICAL.md b/legacy/lasr_web_server/doc/TECHNICAL.md deleted file mode 100644 index b5ad815d5..000000000 --- a/legacy/lasr_web_server/doc/TECHNICAL.md +++ /dev/null @@ -1,14 +0,0 @@ -The following topics are maintained by the web server: - -| Topic | Message Type | Functionality | -| ------------------------------- | --------------- | ---------------------------------------------------------------------- | -| /lasr_web_server/set_input_text | std_msgs/String | Publish to this topic to set the placeholder text of the input box. | -| /lasr_web_server/text_input | std_msgs/String | Publishes submitted user input. Subscribe to this topic to receive it. | - -The web server is built using the following technologies: - -- [roslibjs](http://wiki.ros.org/roslibjs) -- [rosbridge](http://wiki.ros.org/rosbridge_suite) -- websocket -- [Flask](https://flask.palletsprojects.com/en/2.2.x/) -- Bootstrap, HTML, CSS, JavaScript diff --git a/legacy/lasr_web_server/doc/USAGE.md b/legacy/lasr_web_server/doc/USAGE.md deleted file mode 100644 index cc5c957f1..000000000 --- a/legacy/lasr_web_server/doc/USAGE.md +++ /dev/null @@ -1,4 +0,0 @@ -The web server will be typically run alongside the entire Interaction stack (see the Interaction README). -However, if you wish to run in isolation, run the following: - -`roslaunch lasr_web_server web_server.launch` diff --git a/legacy/lasr_web_server/launch/web_server.launch b/legacy/lasr_web_server/launch/web_server.launch deleted file mode 100644 index 18566d5ad..000000000 --- a/legacy/lasr_web_server/launch/web_server.launch +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - \ No newline at end of file diff --git a/legacy/lasr_web_server/package.xml b/legacy/lasr_web_server/package.xml deleted file mode 100644 index 53f2194ab..000000000 --- a/legacy/lasr_web_server/package.xml +++ /dev/null @@ -1,59 +0,0 @@ - - - lasr_web_server - 0.0.0 - The lasr_web_server package - - - - - jared - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - - - - - - - - diff --git a/legacy/lasr_web_server/setup.py b/legacy/lasr_web_server/setup.py deleted file mode 100644 index dedbe18df..000000000 --- a/legacy/lasr_web_server/setup.py +++ /dev/null @@ -1,12 +0,0 @@ - -#!/usr/bin/env python3 - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -setup_args = generate_distutils_setup( - packages=['lasr_web_server'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/lasr_web_server/src/lasr_web_server/static/styles/styles.css b/legacy/lasr_web_server/src/lasr_web_server/static/styles/styles.css deleted file mode 100644 index 170ad0afc..000000000 --- a/legacy/lasr_web_server/src/lasr_web_server/static/styles/styles.css +++ /dev/null @@ -1,39 +0,0 @@ - /* Remove default bullets */ - ul, #myUL { - list-style-type: none; - } - - /* Remove margins and padding from the parent ul */ - #myUL { - margin: 0; - padding: 0; - } - - /* Style the caret/arrow */ - .caret { - cursor: pointer; - user-select: none; /* Prevent text selection */ - } - - /* Create the caret/arrow with a unicode, and style it */ - .caret::before { - content: "\25B6"; - color: black; - display: inline-block; - margin-right: 6px; - } - - /* Rotate the caret/arrow icon when clicked on (using JavaScript) */ - .caret-down::before { - transform: rotate(90deg); - } - - /* Hide the nested list */ - .nested { - display: none; - } - - /* Show the nested list when the user clicks on the caret/arrow (with JavaScript) */ - .active { - display: block; - } \ No newline at end of file diff --git a/legacy/lasr_web_server/src/lasr_web_server/templates/main.html b/legacy/lasr_web_server/src/lasr_web_server/templates/main.html deleted file mode 100644 index a28c3df74..000000000 --- a/legacy/lasr_web_server/src/lasr_web_server/templates/main.html +++ /dev/null @@ -1,289 +0,0 @@ - - - - TIAGO Website - - - - - - - - - - - - - -
-

TIAGo Webpage

-

Provides text-based interaction as well as dignostics and logs.

-
-
-
-

Topic Information

-
    -
  • - Robot Pose -
      -
    • - Position -
        -
      • x: 0
      • -
      • y: 0
      • -
      • z: 0
      • -
      -
    • -
    • - Orientation -
        -
      • x: 0
      • -
      • y: 0
      • -
      • z: 0
      • -
      • w: 0
      • -
      -
    • - - -
    -
  • Clock -
      -
    • secs: 0
    • -
    • nsecs: 0
    • -
    -
  • -
  • RGB Image -
      -
    • -
    -
  • -
-
-

Text Interaction

-
- -
- -
-
- - -

Conversation History

-

-
-
-

Logs

-

-
-
-
-
- - diff --git a/legacy/lasr_web_server/src/lasr_web_server/web_server.py b/legacy/lasr_web_server/src/lasr_web_server/web_server.py deleted file mode 100755 index 16e05d1d7..000000000 --- a/legacy/lasr_web_server/src/lasr_web_server/web_server.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env python3 - -import rospy -import rospkg -from flask import Flask, render_template -from flask_socketio import SocketIO, emit - -import os - -TEMPLATE_ROOT = os.path.join(rospkg.RosPack().get_path("lasr_web_server"), "src", "lasr_web_server", "templates") -HOST = '0.0.0.0' -PORT = 4000 - -app = Flask(__name__, template_folder = TEMPLATE_ROOT) - -@app.route('/') -def render_main(): - return render_template("main.html") - - -if __name__ == "__main__": - app.run(host=HOST, port=PORT, debug=True) \ No newline at end of file diff --git a/legacy/listen_module/CMakeLists.txt b/legacy/listen_module/CMakeLists.txt deleted file mode 100644 index a7e36a2ee..000000000 --- a/legacy/listen_module/CMakeLists.txt +++ /dev/null @@ -1,207 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(listen_module) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - geometry_msgs - roscpp - rospy - std_msgs - message_generation -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html - catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder - add_service_files( - FILES - DialogListen.srv - ) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here - generate_messages( - DEPENDENCIES - geometry_msgs std_msgs -) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES listen_module -# CATKIN_DEPENDS geometry_msgs roscpp rospy std_msgs -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/listen_module.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/listen_module_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_listen_module.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/legacy/listen_module/README.md b/legacy/listen_module/README.md deleted file mode 100644 index 81c6623e5..000000000 --- a/legacy/listen_module/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# listen_module - -The listen_module package - -This package is maintained by: -- [nicole](mailto:nicole@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- geometry_msgs (build) -- roscpp (build) -- rospy (build) -- std_msgs (build) -- geometry_msgs (exec) -- roscpp (exec) -- rospy (exec) -- std_msgs (exec) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -This package has no launch files. - -### Messages - -This package has no messages. - -### Services - -#### `DialogListen` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| action | string | | -| subaction | string | | -| query_text | string | | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| status | bool | | -| result | string | | - - -### Actions - -This package has no actions. diff --git a/legacy/listen_module/package.xml b/legacy/listen_module/package.xml deleted file mode 100644 index 24111eb56..000000000 --- a/legacy/listen_module/package.xml +++ /dev/null @@ -1,71 +0,0 @@ - - - listen_module - 0.0.0 - The listen_module package - - - - - nicole - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - geometry_msgs - roscpp - rospy - std_msgs - geometry_msgs - roscpp - rospy - std_msgs - geometry_msgs - roscpp - rospy - std_msgs - - - - - - - - diff --git a/legacy/listen_module/src/listen_module/dialogflow_client.py b/legacy/listen_module/src/listen_module/dialogflow_client.py deleted file mode 100755 index 499d97d03..000000000 --- a/legacy/listen_module/src/listen_module/dialogflow_client.py +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/env python3 -import rospy, os, rospkg, uuid -from stream_handler import StreamHandler -import sounddevice as sd -from google.cloud import dialogflow_v2 as dialogflow - -# Inspired by the LASR team dialogflow package! Thank you for teaching me so much! - -INPUT_DEVICE = sd.query_hostapis()[0]['default_input_device'] -DEVICE_INFO = sd.query_devices(INPUT_DEVICE, 'input') -RATE = int(DEVICE_INFO["default_samplerate"]) # Sampling rate in Hz -CHANNELS = 1 # Number of channels -BLOCK_SIZE = 4096 # Number of frames per block -FORMAT = "int16" # Data type of audio samples -LANGUAGE_CODE = "en-GB" # Language code of the query - - -class DialogClient: - def __init__(self, project_id, session_id=str(uuid.uuid4())): - self.project_id = project_id - self.project_credentials = os.path.join(rospkg.RosPack().get_path('listen_module'), 'config', - '{}.json'.format(self.project_id)) - self.session_id = session_id - self.session_client = dialogflow.SessionsClient.from_service_account_file(self.project_credentials) - self.session = self.session_client.session_path(self.project_id, self.session_id) - self.session_stream = None - - def detect_intent(self, audio_or_text, query_params=None): - if audio_or_text == "SOUND:PLAYING:PLEASE": - self.session_stream = StreamHandler(self.session_client, self.session) - return self.detect_audio(query_params) - else: - return self.detect_text(audio_or_text, query_params) - - def detect_text(self, text, query_params=None): - text_input = dialogflow.types.TextInput(text=text, language_code=LANGUAGE_CODE) - query_input = dialogflow.types.QueryInput(text=text_input) - request = dialogflow.DetectIntentRequest( - session=self.session, - query_input=query_input, - query_params=query_params - ) - response = self.session_client.detect_intent(request=request) - print(response, response.query_result.fulfillment_text, response.query_result.intent.display_name, - "resp in text") - return response - - - def detect_audio(self, query_params=None): - responses = self.session_stream.get_responses(self.session_stream, query_params) - response = None - for response in responses: - print(response, "response in audio") - if response.recognition_result.message_type == dialogflow.StreamingRecognitionResult.MessageType.END_OF_SINGLE_UTTERANCE: - print("End of single utterance") - self.stop() - if not response.query_result.query_text: - print("No query text") - return response - - def stop(self): - self.session_stream.stop() - - -if __name__ == '__main__': - rospy.init_node('dialogflow_client') - try: - client = DialogClient("newagent-groe") - # client.detect_intent("CAn you press button 7") - client.detect_intent("SOUND:PLAYING:PLEASE") - - - except Exception as e: - print(e) - print("Shutting down") diff --git a/legacy/listen_module/src/listen_module/dialogflow_srv.py b/legacy/listen_module/src/listen_module/dialogflow_srv.py deleted file mode 100755 index 4a991f69c..000000000 --- a/legacy/listen_module/src/listen_module/dialogflow_srv.py +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env python3 -import rospy -from listen_module.srv import DialogListen, DialogListenRequest, DialogListenResponse -from listen_module.subactions import ButtonAction, NavigationAction -import sounddevice as sd -import os, rospkg, re -from proto.marshal.collections.maps import MapComposite - - -# Audio recording parameters -INPUT_DEVICE = sd.query_hostapis()[0]['default_input_device'] -DEVICE_INFO = sd.query_devices(INPUT_DEVICE, 'input') -RATE = int(DEVICE_INFO["default_samplerate"]) # Sampling rate in Hz -CHANNELS = 1 # Number of channels -BLOCK_SIZE = 4096 # Number of frames per block -FORMAT = "int16" # Data type of audio samples -LANGUAGE_CODE = "en-GB" # Language code of the query - -RECORD_SECONDS = 10 -GOOGLE_APPLICATION_CREDENTIALS = os.path.join(rospkg.RosPack().get_path('listen_module'), 'config', - '{}.json'.format('newagent-groe')) - - -class DialogflowServer: - def __init__(self): - self.handler = { - "BUTTON_PRESSED": { - "id": "newagent-groe", - "action": ButtonAction - - }, - "ROOM_REQUEST": { - "id": "newagent-groe", - "action": NavigationAction - } - } - self.dialog_srv = rospy.Service("/dialogflow_server", DialogListen, self.handle_audio_cb) - self.stream = sd.RawInputStream(samplerate=RATE, blocksize=BLOCK_SIZE, device=INPUT_DEVICE, - dtype=FORMAT, - channels=CHANNELS) - - - - def handle_audio_cb(self, request: DialogListenRequest): - rospy.loginfo("Received request in dialog server") - rospy.loginfo(request.query_text) - resp = DialogListenResponse() - - - action_id = self.handler[request.action]["id"] - action = self.handler[request.action]["action"](action_id=action_id) - result = action.subaction[request.subaction](audio_or_text=request.query_text) # check if is text - print(action, result, "action, subaction, result") - try: - if result: - resp.status = True - if isinstance(result, MapComposite) or isinstance(result, dict): - result = dict(result) - print(result, "result" + ("-" * 100)) - result = str(result["name"]) - print(result, "result" + ("-" * 100)) - if re.search(r'^[-+]?[0-9]*\.?[0-9]+$', str(result)) or result.isdigit(): - result = float(result) - print(result) - resp.result = str(result) - except Exception as e: - rospy.logerr(e) - resp.status = False - resp.result = str("Error in handling audio") - return resp - - -if __name__ == '__main__': - rospy.init_node('dialogflow_server') - server = DialogflowServer() - rospy.loginfo("Dialogflow server is ready to take input") - rospy.spin() diff --git a/legacy/listen_module/src/listen_module/stream_handler.py b/legacy/listen_module/src/listen_module/stream_handler.py deleted file mode 100755 index 69bdf9655..000000000 --- a/legacy/listen_module/src/listen_module/stream_handler.py +++ /dev/null @@ -1,130 +0,0 @@ -#!/usr/bin/env python3 -import sounddevice as sd -from proto.marshal.collections.maps import MapComposite -from google.cloud import dialogflow_v2 as dialogflow -import rospkg, os, rospy, time, uuid - -# Define some parameters for the audio stream -INPUT_DEVICE = sd.query_hostapis()[0]['default_input_device'] -print(INPUT_DEVICE) -DEVICE_INFO = sd.query_devices(INPUT_DEVICE, 'input') -RATE = int(DEVICE_INFO["default_samplerate"]) # Sampling rate in Hz, ERRORS might occur as Expressions of alsa -RATE = 22050 -print(RATE) -CHANNELS = 1 # Number of channels -BLOCK_SIZE = 4096 # Number of frames per block -FORMAT = "int16" # Data type of audio samples -LANGUAGE_CODE = "en-GB" # Language code of the query -GOOGLE_APPLICATION_CREDENTIALS = os.path.join(rospkg.RosPack().get_path('listen_module'), 'config', - '{}.json'.format('newagent-groe')) -# GOOGLE_APPLICATION_CREDENTIALS = "/home/nicole/robocup/rexy/src/rexy/listen_module/config/newagent-groe.json" - -class StreamHandler: - def __init__(self, session_client,session): - self.session = session - self.session_client = session_client - self.stop_requested = False - self.query_params = None - - - def stop(self): - self.stop_requested = True - - def audio_generator(self): - start_time = time.time() - rospy.loginfo("Starting stream handler") - with sd.RawInputStream(samplerate=RATE, channels=CHANNELS, dtype=FORMAT) as self.stream: - while self.stream.active: - if self.stop_requested or rospy.is_shutdown() or time.time() - start_time > 10000: - rospy.loginfo("Stopping stream handler") - break - data, overflowed = self.stream.read(BLOCK_SIZE) - if overflowed: - print("Warning: some audio data was lost") - yield bytes(data) - - # Define a generator function that yields Dialogflow requests - def request_generator(self): - # Create the initial request with the session and query input - request = dialogflow.StreamingDetectIntentRequest( - session=self.session, - query_params=self.query_params, - query_input=dialogflow.QueryInput( - audio_config=dialogflow.InputAudioConfig( - audio_encoding=dialogflow.AudioEncoding.AUDIO_ENCODING_LINEAR_16, - sample_rate_hertz=RATE, - language_code=LANGUAGE_CODE, - single_utterance=True, - ), - ), - ) - yield request - - # Loop over the audio blocks from the microphone - for input_audio in self.audio_generator(): - # Create a request with the input audio - request = dialogflow.StreamingDetectIntentRequest(input_audio=input_audio) - yield request - - - def get_responses(self, st, query_params=None): - # send the requests to the dialogflow api and get the responses - try: - self.query_params = query_params - self.stop_requested = False - responses = st.session_client.streaming_detect_intent(st.request_generator()) - self.query_params = None - return responses - except Exception as e: - rospy.loginfo(str(e) + " error in request of stream handler!") - -if __name__ == '__main__': - - rospy.init_node('stream_handler') - project_id = 'newagent-groe' - session_id = str(uuid.uuid4()) - # project_credentials = os.path.join(rospkg.rospack().get_path('listen_module'), 'config', - # '{}.json'.format(project_id)) - project_credentials = "/home/nicole/robocup/rexy/src/rexy/listen_module/config/newagent-groe.json" - session_client = dialogflow.SessionsClient.from_service_account_file(project_credentials) - session = session_client.session_path(project_id, session_id) - st = StreamHandler(session_client, session) - # send the requests to the dialogflow api and get the responses - try: - responses = st.session_client.streaming_detect_intent(st.request_generator()) - print(responses) - - print("=" * 20) - try: - for response in responses: - print( - 'intermediate transcript: "{}".'.format( - response.recognition_result.transcript - ) - ) - - # note: the result from the last response is the final transcript along - # with the detected content. - query_result = response.query_result - - print("=" * 20) - print("query text: {}".format(query_result.query_text)) - print( - "detected intent: {} (confidence: {})\n".format( - query_result.intent.display_name, query_result.intent_detection_confidence - ) - ) - print("fulfillment text: {}\n".format(query_result.fulfillment_text)) - # get the parameters - parameters = query_result.parameters - if parameters: - for parameter_name, parameter_value in parameters.items(): - print(f"parameter: {parameter_name} = {parameter_value}") - if isinstance(parameter_value, MapComposite): - parameter_values = dict(parameter_value) - print(parameter_values) - except Exception as e: - print(e, "error in response") - except Exception as e: - rospy.loginfo(str(e) + " error in request of stream handler!") - diff --git a/legacy/listen_module/src/listen_module/subactions/__init__.py b/legacy/listen_module/src/listen_module/subactions/__init__.py deleted file mode 100644 index f75913906..000000000 --- a/legacy/listen_module/src/listen_module/subactions/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .base import BaseActions -from .navigationAction import NavigationAction -from .button import ButtonAction \ No newline at end of file diff --git a/legacy/listen_module/src/listen_module/subactions/base.py b/legacy/listen_module/src/listen_module/subactions/base.py deleted file mode 100755 index c2b829b60..000000000 --- a/legacy/listen_module/src/listen_module/subactions/base.py +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env python3 -from google.cloud import dialogflow_v2 as dialogflow -from listen_module.dialogflow_client import DialogClient -from proto.marshal.collections.maps import MapComposite -import uuid - - -class BaseActions: - def __init__(self, action_id): - self.action_id = action_id - self.client = DialogClient(action_id) - self.session_id = uuid.uuid4() - - def get_context(self, context, lifespan=1): - return dialogflow.types.Context( - name=dialogflow.ContextsClient.context_path(self.action_id, self.session_id, context), - lifespan_count=lifespan) - - def dialog_in_context(self, audio_or_text: str = "SOUND:PLAYING:PLEASE", context: str = None): - query_params = None - if context: - query_params = dialogflow.types.QueryParameters(contexts=[self.get_context(context)]) - return self.client.detect_intent(audio_or_text=audio_or_text, query_params=query_params) - - - def get_parameters(self, response): - response = response.query_result.parameters - resp = [] - # for response in responses: - if response: - res = {} - query_result = response.query_result - query_text = query_result.query_text - parameters = query_result.parameters - for parameter_name, parameter_value in parameters.items(): - if isinstance(parameter_value, MapComposite): - parameter_values = dict(parameter_value) - resp.append((parameter_name, parameter_values)) - else: - resp.append((parameter_name, parameter_value)) - - res["query_text"] = query_text - res["parameters"] = parameters - res["query_result"] = query_result - res["response"] = resp - print(res, "res in audio") - return res - - def stop(self): - self.client.stop() diff --git a/legacy/listen_module/src/listen_module/subactions/button.py b/legacy/listen_module/src/listen_module/subactions/button.py deleted file mode 100755 index 6c479eb67..000000000 --- a/legacy/listen_module/src/listen_module/subactions/button.py +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env python3 - -import rospy -from listen_module.subactions import BaseActions - -class ButtonAction(BaseActions): - def __init__(self, action_id): - super(ButtonAction, self).__init__(action_id) - - self.subaction = { - "confirm_button": self.confirm_button, - "confirm_floor": self.ask_floor, - } - - def ask_floor(self, audio_or_text): - return self.handle_dialog_in_context(self.dialog_in_context(audio_or_text=audio_or_text, context="ConfirmFloor"), - context="ConfirmFloor") - - def confirm_button(self, audio_or_text): - return self.handle_dialog_in_context( - self.dialog_in_context(audio_or_text=audio_or_text, context="ConfirmButton"), context="ConfirmButton") - - @staticmethod - def handle_dialog_in_context(resp, context): - try: - # confirm button or floor - if context == "ConfirmButton" or context == "ConfirmFloor": - return resp.query_result.parameters["yesno"] - else: - rospy.loginfo("No context found in the navigation action") - raise ValueError - except ValueError: - rospy.loginfo("I am not sure if tthe buttons are pressed and the floor is selected.") - return -1 diff --git a/legacy/listen_module/src/listen_module/subactions/navigationAction.py b/legacy/listen_module/src/listen_module/subactions/navigationAction.py deleted file mode 100755 index 566a06785..000000000 --- a/legacy/listen_module/src/listen_module/subactions/navigationAction.py +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env python3 - -import rospy -from listen_module.subactions import BaseActions - - -class NavigationAction(BaseActions): - def __init__(self, action_id): - super(NavigationAction, self).__init__(action_id) - - self.subaction = { - "confirm_location": self.confirm_location, - "ask_location": self.ask_location, - } - - def ask_location(self, audio_or_text): - return self.handle_dialog_in_context(self.dialog_in_context(audio_or_text=audio_or_text, context="AskLocation"), - context="AskLocation") - - def confirm_location(self, audio_or_text): - return self.handle_dialog_in_context( - self.dialog_in_context(audio_or_text=audio_or_text, context="ConfirmLocation"), context="ConfirmLocation") - - @staticmethod - def handle_dialog_in_context(resp, context): - try: - # number, person, Destination - if context == "ConfirmLocation": - return resp.query_result.parameters["yesno"] - elif context == "AskLocation": - print(resp.query_result) - return resp.query_result.parameters["Destination"] or resp.query_result.parameters["number"] or \ - resp.query_result.parameters["person"] - else: - rospy.loginfo("No context found in the navigation action") - raise ValueError - except ValueError: - print("I don't know where you want to go.") diff --git a/legacy/listen_module/srv/DialogListen.srv b/legacy/listen_module/srv/DialogListen.srv deleted file mode 100644 index 8fb8ea81c..000000000 --- a/legacy/listen_module/srv/DialogListen.srv +++ /dev/null @@ -1,7 +0,0 @@ -string action -string subaction -string query_text ---- -bool status -string result - diff --git a/legacy/markers/README.md b/legacy/markers/README.md deleted file mode 100644 index 5fca57db9..000000000 --- a/legacy/markers/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# markers - -The markers package - -This package is maintained by: -- [nicole](mailto:nicole@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- geometry_msgs (build) -- message_generation (build) -- roscpp (build) -- rospy (build) -- std_msgs (build) -- geometry_msgs (exec) -- roscpp (exec) -- rospy (exec) -- std_msgs (exec) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -This package has no launch files. - -### Messages - -This package has no messages. - -### Services - -This package has no services. - -### Actions - -This package has no actions. diff --git a/legacy/markers/setup.py b/legacy/markers/setup.py deleted file mode 100644 index b3f12485e..000000000 --- a/legacy/markers/setup.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -# fetch values from package.xml -setup_args = generate_distutils_setup( - packages=['markers'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/markers/src/markers/markers_helpers.py b/legacy/markers/src/markers/markers_helpers.py deleted file mode 100755 index 8b2c15b44..000000000 --- a/legacy/markers/src/markers/markers_helpers.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python3 -import rospy -from visualization_msgs.msg import Marker - -def create_point_marker(x, y, z, idx, g=1.0): - marker_msg = Marker() - marker_msg.header.frame_id = "map" - marker_msg.header.stamp = rospy.Time.now() - marker_msg.id = idx - marker_msg.type = Marker.SPHERE - marker_msg.action = Marker.ADD - marker_msg.pose.position.x = x - marker_msg.pose.position.y = y - marker_msg.pose.position.z = z - marker_msg.pose.orientation.w = 1.0 - marker_msg.scale.x = 0.1 - marker_msg.scale.y = 0.1 - marker_msg.scale.z = 0.1 - marker_msg.color.a = 1.0 - marker_msg.color.r = 0.0 - marker_msg.color.g = g - marker_msg.color.b = 0.0 - return marker_msg - -if __name__ == '__main__': - rospy.init_node("viz") - - wait_pos = rospy.Publisher("/wait_pos_debug", Marker, queue_size=100) - wait_points = rospy.get_param("/wait_position_points") - for i, point in enumerate(wait_points): - print(i, point, "wait") - rospy.sleep(1) - wait_pos.publish(create_point_marker(point[0], point[1], 0, i,g=0.5)) - - - # wait_points_cen = rospy.get_param("/wait_position_center_point") - # wait_pos_cen = rospy.Publisher("/wait_position_center_point_debugger", Marker, queue_size=100) - # for point in wait_points_cen: - # print(point) - # wait_pos_cen.publish(create_point_marker(point[0], point[1], 0, 0)) - - - # lift_pos = rospy.Publisher("/lift_pos_debugger", Marker, queue_size=100) - # lift_pose = rospy.get_param("/lift_position_points") - # for i, point in enumerate(lift_pose): - # rospy.sleep(1) - # lift_pos.publish(create_point_marker(point[0], point[1], 0, i)) - - # lift_center = rospy.get_param("/lift_position_center_point") - # print(lift_center) - # lift_cen = rospy.Publisher("/lift_pos_center_debugger", Marker, queue_size=100) - # # for i, point in enumerate(lift_center): - # # print(i, point,"center lift") - # lift_cen.publish(create_point_marker(lift_center[0], lift_center[1], 0, 100, g=0.1)) - - while not rospy.is_shutdown(): - rospy.spin() diff --git a/legacy/meet_and_greet/README.md b/legacy/meet_and_greet/README.md deleted file mode 100644 index efc8f077d..000000000 --- a/legacy/meet_and_greet/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# meet_and_greet - -The meet_and_greet package - -This package is maintained by: -- [nicole](mailto:nicole@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- roscpp (build) -- rospy (build) -- std_msgs (build) -- roscpp (exec) -- rospy (exec) -- std_msgs (exec) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -> How to run - -```python -roslaunch lasr_perception_server perception_server.launch -rosrun meet_and_greet simple_meet_and_greet.py - -``` - -Make sure you have either: - -```python -roslaunch custom_worlds wrs_receptionist.launch -roslaunch usb_cam usb_cam-test.launch -``` - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -> The Meet and greet package files: - -1. Explore suroundings state -> robot wondering around (tbc) -2. Look around state -> robot moving the head and searchign for people, to output known and unknown people (tbc) -3. Meet and greet sm -> teh state machine implementing preemtive states (tbc) -4. SImple meet and greet -> the demo file to represent the recognition - -## ROS Definitions - -### Launch Files - -This package has no launch files. - -### Messages - -This package has no messages. - -### Services - -This package has no services. - -### Actions - -This package has no actions. diff --git a/legacy/meet_and_greet/config/map_points.yaml b/legacy/meet_and_greet/config/map_points.yaml deleted file mode 100644 index 900017367..000000000 --- a/legacy/meet_and_greet/config/map_points.yaml +++ /dev/null @@ -1,44 +0,0 @@ -point1: - position: - x: -0.696476736395 - y: -1.26116825784 - z: 0.0 - orientation: - x: 0.0 - y: 0.0 - z: -0.0435142405385 - w: 0.999052806848 - - -point2: - position: - x: 0.587315805429 - y: -0.886969920382 - z: 0.0 - orientation: - x: 0.0 - y: 0.0 - z: -0.887756799014 - w: 0.460312791268 - -door_simu: - position: - x: 6.133732680056999 - y: -0.7289013393838328 - z: 0.0 - orientation: - x: 0.0 - y: 0.0 - z: 0.028572161371893147 - w: 0.9995917324560756 - -door: - position: - x: -0.8110280734845722 - y: -1.7996335500178748 - z: 0.0 - orientation: - x: 0.0 - y: 0.0 - z: -0.9671729273733352 - w: 0.2541191227674403 \ No newline at end of file diff --git a/legacy/meet_and_greet/doc/TECHNICAL.md b/legacy/meet_and_greet/doc/TECHNICAL.md deleted file mode 100644 index 566d892e0..000000000 --- a/legacy/meet_and_greet/doc/TECHNICAL.md +++ /dev/null @@ -1,6 +0,0 @@ -> The Meet and greet package files: - -1. Explore suroundings state -> robot wondering around (tbc) -2. Look around state -> robot moving the head and searchign for people, to output known and unknown people (tbc) -3. Meet and greet sm -> teh state machine implementing preemtive states (tbc) -4. SImple meet and greet -> the demo file to represent the recognition diff --git a/legacy/meet_and_greet/doc/USAGE.md b/legacy/meet_and_greet/doc/USAGE.md deleted file mode 100644 index 157aa24cf..000000000 --- a/legacy/meet_and_greet/doc/USAGE.md +++ /dev/null @@ -1,15 +0,0 @@ -> How to run - -```python -# NOTE: lasr_perception_server package has been removed -roslaunch lasr_perception_server perception_server.launch -rosrun meet_and_greet simple_meet_and_greet.py - -``` - -Make sure you have either: - -```python -roslaunch custom_worlds wrs_receptionist.launch -roslaunch usb_cam usb_cam-test.launch -``` diff --git a/legacy/meet_and_greet/package.xml.excluded b/legacy/meet_and_greet/package.xml.excluded deleted file mode 100644 index 3952e47fc..000000000 --- a/legacy/meet_and_greet/package.xml.excluded +++ /dev/null @@ -1,68 +0,0 @@ - - - meet_and_greet - 0.0.0 - The meet_and_greet package - - - - - nicole - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - roscpp - rospy - std_msgs - roscpp - rospy - std_msgs - roscpp - rospy - std_msgs - - - - - - - - diff --git a/legacy/meet_and_greet/setup.py b/legacy/meet_and_greet/setup.py deleted file mode 100644 index 70e89d64d..000000000 --- a/legacy/meet_and_greet/setup.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python3 - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -setup_args = generate_distutils_setup( - packages=['meet_and_greet'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/meet_and_greet/src/meet_and_greet/__init__.py b/legacy/meet_and_greet/src/meet_and_greet/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/legacy/meet_and_greet/src/meet_and_greet/explore_surroundings_state.py b/legacy/meet_and_greet/src/meet_and_greet/explore_surroundings_state.py deleted file mode 100755 index 63233f7ba..000000000 --- a/legacy/meet_and_greet/src/meet_and_greet/explore_surroundings_state.py +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env python3 -# coding=utf-8 -import smach -import rospy -import random -from tiago_controllers.helpers import get_pose_from_param, is_running -from tiago_controllers.controllers import Controllers - -class ExploreSurroundingsState(smach.State): - ''' - Wonders around and detects people - ''' - - def __init__(self, controllers): - smach.State.__init__(self, - outcomes=['finished_exploring'] - ) - - self.controllers = controllers - self.map_points = ['/point1', '/point2'] # pos on map - - def execute(self, userdata): - # wonder between points - index = 0 - while not rospy.is_shutdown(): - if self.preempt_requested(): - # cancel the current base goal - # \TODO: check if there is a process - self.controllers.base_controller.cancel() - rospy.logwarn(' Preempting the Explore Surroundings state') - self.service_preempt() - return 'finished_exploring' - if not is_running(self.controllers.base_controller.get_client()): - print('i am here', index) - self.controllers.base_controller.async_to_pose(get_pose_from_param(self.map_points[index])) - if index == 1: - index = 0 - else: - index = 1 - - -if __name__ == '__main__': - rospy.init_node('smach_example_state_machine') - con = Controllers() - sm = smach.StateMachine(outcomes=['finished_exploring']) - with sm: - smach.StateMachine.add('EXPLORE_SURROUNDINGS', ExploreSurroundingsState(con), - transitions={'finished_exploring':'finished_exploring'},) - outcome = sm.execute() - rospy.loginfo('I have completed execution with outcome: ') - rospy.loginfo(outcome) diff --git a/legacy/meet_and_greet/src/meet_and_greet/look_around_state.py b/legacy/meet_and_greet/src/meet_and_greet/look_around_state.py deleted file mode 100755 index bebed07f8..000000000 --- a/legacy/meet_and_greet/src/meet_and_greet/look_around_state.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python3 -import os -import smach -import rospy, rospkg -from sensor_msgs.msg import Image -from cv_bridge3 import CvBridge, cv2 -from lasr_perception_server.srv import DetectImage -from std_msgs.msg import Empty - -class LookAroundState(smach.State): - ''' - Looks aroudn and tries to detect people - ''' - - def __init__(self, controllers): - smach.State.__init__(self, - outcomes=['finished_without_info', 'finished_with_known_ppl', 'finished_with_unknown_ppl']) - - self.controllers = controllers - if self.controllers.head_controller: - self.rotate_head = self.controllers.head_controller.sync_reach_to - else: - self.rotate_head = lambda *args, **kwargs : None - - self.pub = rospy.Publisher('/sm_reset', Empty, queue_size=1) - def find_faces(self): - print('in find faces') - imgs = [] - rate = rospy.Rate(3) - for i in range(2): - imgs.append(rospy.wait_for_message("/xtion/rgb/image_raw", Image)) - rate.sleep() - - det = rospy.ServiceProxy("lasr_perception_server/detect_objects_image", DetectImage) - resp = det(imgs, "coco", 0.7, 0.3, ["person"],'known_people').detected_objects - return resp - - def execute(self, userdata): - direction = 0.2 - for i in range(2): - direction *= -1 - self.rotate_head(direction, -0.2, velocities=[0.1, 0.0]) - rospy.sleep(15) - if True: - # if self.find_faces(): - print(self.find_faces()) - self.pub.publish(Empty()) - return 'finished_with_known_ppl' - # elif len(self.find_faces()) > 2: - # return 'finished_with_unknown_ppl' - - # return 'finished_without_info' - - -# if __name__ == '__main__': -# rospy.init_node('look_around_state') -# sm = smach.StateMachine(outcomes=['success']) -# with sm: -# smach.StateMachine.add('LOOK_AROUND_STATE', LookAroundState(), -# transitions={'found_people':'finished_looking_around'}) -# outcome = sm.execute() -# rospy.loginfo('I have completed execution with outcome: ') -# rospy.loginfo(outcome) diff --git a/legacy/meet_and_greet/src/meet_and_greet/meet_and_greet_sm.py b/legacy/meet_and_greet/src/meet_and_greet/meet_and_greet_sm.py deleted file mode 100755 index 76661cb08..000000000 --- a/legacy/meet_and_greet/src/meet_and_greet/meet_and_greet_sm.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python3 -# coding=utf-8 -import rospy -import smach -import sys -from smach import Concurrence -from smach_ros import IntrospectionServer -from explore_surroundings_state import ExploreSurroundingsState -from look_around_state import LookAroundState -from tiago_controllers.controllers import Controllers -import smach_ros -from std_msgs.msg import Empty - - -def out_cb(outcome_map): - if outcome_map['LOOK_AROUND'] == 'finished_with_known_ppl': - return 'finished_wandering' - if outcome_map['LOOK_AROUND'] == 'finished_with_unknown_ppl': - return 'finished_wandering' - else: - return 'wander_around' - -def child_term_cb(outcome_map): - if outcome_map['LOOK_AROUND'] == 'finished_with_known_ppl': - return True - elif outcome_map['LOOK_AROUND'] == 'finished_with_unknown_ppl': - return True - elif outcome_map['MONITORING'] == 'invalid': - return True - else: - return False -def monitoring_cb(ud, msg): - return False -class MeetAndGreetSM(smach.StateMachine): - def __init__(self): - smach.StateMachine.__init__(self, outcomes=['finished_meet_and_greet', 'aborted']) - self.controllers = Controllers() - - with self: - cc = Concurrence(outcomes=['finished_wandering','wander_around'], - default_outcome='wander_around', - output_keys=['detection_map'], - outcome_cb=out_cb, - child_termination_cb=child_term_cb - ) - with cc: - smach.Concurrence.add('EXPLORE_SURROUNDINGS', ExploreSurroundingsState(self.controllers)) - smach.Concurrence.add('LOOK_AROUND', LookAroundState(self.controllers)) - smach.Concurrence.add('MONITORING', smach_ros.MonitorState("/sm_reset", Empty, monitoring_cb)) - - smach.StateMachine.add('WANDERING', cc, - transitions={'finished_wandering': 'finished_meet_and_greet', 'wander_around':'WANDERING'}) - - - -if __name__ == "__main__": - rospy.init_node("meet_and_greet", sys.argv, anonymous=True) - sm = MeetAndGreetSM() - sis = IntrospectionServer('iserver', sm, 'SM_ROOT') - sis.start() - outcome = sm.execute() - sis.stop() - rospy.loginfo(f'I have completed execution with outcome {outcome}') diff --git a/legacy/meet_and_greet/src/meet_and_greet/simple_meet_and_greet.py b/legacy/meet_and_greet/src/meet_and_greet/simple_meet_and_greet.py deleted file mode 100755 index 3e20920d2..000000000 --- a/legacy/meet_and_greet/src/meet_and_greet/simple_meet_and_greet.py +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env python3 -import rospy -import os -import rospkg -from lasr_voice.voice import Voice -from tiago_controllers.helpers import get_pose_from_param, is_running -from sensor_msgs.msg import Image -from cv_bridge3 import CvBridge, cv2 -from lasr_perception_server.srv import DetectImage -from tiago_controllers.controllers import Controllers - -IMAGES = 1 - -class SimpleMeetGreet: - def __init__(self): - self.controllers = Controllers() - - if rospy.get_published_topics(namespace='/xtion'): - print('in xtion') - self.topic = '/xtion/rgb/image_raw' - else: - print('in usb') - self.topic = '/usb_cam/image_raw' - - - self.voice = Voice() - self.map_points = ['/point1', '/point2'] # pos on map - - def find_person(self): - imgs = [] - rate = rospy.Rate(3) - for i in range(IMAGES): - im = rospy.wait_for_message('/usb_cam/image_raw', Image) - imgs.append(im) - # imgs.append(rospy.wait_for_message(self.topic, Image)) - rate.sleep() - - det = rospy.ServiceProxy("lasr_perception_server/detect_objects_image", DetectImage) - resp = det(imgs, "coco", 0.7, 0.3, ["person"], 'known_people').detected_objects - print(resp) - return resp - - def handle_findings(self, detections): - names = [] - counter = 0 - if len(detections)> 0: - for human in detections: - print('human name', human.name) - if human.name == 'person': - counter = counter + 1 - else: - self.voice.sync_tts("Hi" +str(human.name)) - - print('hi, ', human.name) - if counter > 0: - # self.voice.sync_tts(' there are new people. I have not met ' + str(counter) + 'people') - if counter == 1: - # self.voice.sync_tts("there are new people. I have not met you before") - print(' there are new people. I have not met you before') - else: - # self.voice.sync_tts(' there are new people. I have not met ' + str(counter) + 'people') - print(' there are new people. I have not met ' + str(counter) + 'people') - - - - def main(self): - # pos = get_pose_from_param('/door') - # self.controllers.base_controller.sync_to_pose(pos) - for i in range(4): - resp = self.find_person() - if len(resp) > 0: - self.handle_findings(resp) - return - - # self.voice.sync_tts("Hi, i don't know anyone") - print("Hi, i don't know anyone") - print('I dont known anyone') - - - - -if __name__ == '__main__': - rospy.init_node("simple_meet_and_greet", anonymous=True) - test = SimpleMeetGreet() - test.main() diff --git a/legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/__init__.py b/legacy/narrow_space_navigation/CATKIN_IGNORE similarity index 100% rename from legacy/lasr_object_detection_yolo/src/lasr_object_detection_yolo/darknet_pytorch/__init__.py rename to legacy/narrow_space_navigation/CATKIN_IGNORE diff --git a/legacy/object_interest_tracking/.gitignore b/legacy/object_interest_tracking/.gitignore deleted file mode 100644 index dcd4e925d..000000000 --- a/legacy/object_interest_tracking/.gitignore +++ /dev/null @@ -1,272 +0,0 @@ -# Created by https://www.toptal.com/developers/gitignore/api/ros,python,venv,visualstudiocode,linux -# Edit at https://www.toptal.com/developers/gitignore?templates=ros,python,venv,visualstudiocode,linux - -### Linux ### -*~ - -# temporary files which can be created if a process still has a handle open of a deleted file -.fuse_hidden* - -# KDE directory preferences -.directory - -# Linux trash folder which might appear on any partition or disk -.Trash-* - -# .nfs files are created when an open file is removed but is still being accessed -.nfs* - -### Python ### -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -*.py,cover -.hypothesis/ -.pytest_cache/ -cover/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 -db.sqlite3-journal - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -.pybuilder/ -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# IPython -profile_default/ -ipython_config.py - -# pyenv -# For a library or package, you might want to ignore these files since the code is -# intended to run in multiple environments; otherwise, check them in: -# .python-version - -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# poetry -# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. -# This is especially recommended for binary packages to ensure reproducibility, and is more -# commonly ignored for libraries. -# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control -#poetry.lock - -# pdm -# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. -#pdm.lock -# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it -# in version control. -# https://pdm.fming.dev/#use-with-ide -.pdm.toml - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm -__pypackages__/ - -# Celery stuff -celerybeat-schedule -celerybeat.pid - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ - -# pytype static type analyzer -.pytype/ - -# Cython debug symbols -cython_debug/ - -# PyCharm -# JetBrains specific template is maintained in a separate JetBrains.gitignore that can -# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore -# and can be added to the global gitignore or merged into this file. For a more nuclear -# option (not recommended) you can uncomment the following to ignore the entire idea folder. -#.idea/ - -### Python Patch ### -# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration -poetry.toml - -# ruff -.ruff_cache/ - -# LSP config files -pyrightconfig.json - -### ROS ### -devel/ -logs/ -bin/ -msg_gen/ -srv_gen/ -msg/*Action.msg -msg/*ActionFeedback.msg -msg/*ActionGoal.msg -msg/*ActionResult.msg -msg/*Feedback.msg -msg/*Goal.msg -msg/*Result.msg -msg/_*.py -build_isolated/ -devel_isolated/ - -# Generated by dynamic reconfigure -*.cfgc -/cfg/cpp/ -/cfg/*.py - -# Ignore generated docs -*.dox -*.wikidoc - -# eclipse stuff -.project -.cproject - -# qcreator stuff -CMakeLists.txt.user - -srv/_*.py -*.pcd -*.pyc -qtcreator-* -*.user - -/planning/cfg -/planning/docs -/planning/src - - -# Emacs -.#* - -# Catkin custom files -CATKIN_IGNORE - -### venv ### -# Virtualenv -# http://iamzed.com/2009/05/07/a-primer-on-virtualenv/ -[Bb]in -[Ii]nclude -[Ll]ib -[Ll]ib64 -[Ll]ocal -[Ss]cripts -pyvenv.cfg -pip-selfcheck.json - -### VisualStudioCode ### -.vscode/* -!.vscode/settings.json -!.vscode/tasks.json -!.vscode/launch.json -!.vscode/extensions.json -!.vscode/*.code-snippets - -# Local History for Visual Studio Code -.history/ - -# Built Visual Studio Code Extensions -*.vsix - -### VisualStudioCode Patch ### -# Ignore all local history of files -.history -.ionide - -# End of https://www.toptal.com/developers/gitignore/api/ros,python,venv,visualstudiocode,linux diff --git a/legacy/object_interest_tracking/CMakeLists.txt b/legacy/object_interest_tracking/CMakeLists.txt deleted file mode 100644 index b2af86842..000000000 --- a/legacy/object_interest_tracking/CMakeLists.txt +++ /dev/null @@ -1,210 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(object_interest_tracking) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - geometry_msgs - message_generation - message_runtime - rospy - sensor_msgs - std_msgs -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -# catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder -add_service_files( - FILES - Tdr.srv -) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -generate_messages( - DEPENDENCIES - std_msgs - geometry_msgs - sensor_msgs -) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES engagementScore - CATKIN_DEPENDS geometry_msgs message_generation message_runtime rospy sensor_msgs std_msgs -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/engagementScore.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/engagementScore_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_engagementScore.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/legacy/object_interest_tracking/README.MD b/legacy/object_interest_tracking/README.MD deleted file mode 100644 index d63ea8343..000000000 --- a/legacy/object_interest_tracking/README.MD +++ /dev/null @@ -1,66 +0,0 @@ -# object_interest_tracking - -The object_interest_tracking package - -This package is maintained by: -- [yousef](mailto:yousef@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- geometry_msgs (build) -- message_generation (build) -- rospy (build) -- sensor_msgs (build) -- std_msgs (build) -- message_runtime (build) -- geometry_msgs (exec) -- message_runtime (exec) -- rospy (exec) -- sensor_msgs (exec) -- std_msgs (exec) -- message_generation (exec) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -This package has no launch files. - -### Messages - -This package has no messages. - -### Services - -#### `Tdr` - -Request - -| Field | Type | Description | -|:-:|:-:|---| - -Response - -| Field | Type | Description | -|:-:|:-:|---| - - -### Actions - -This package has no actions. diff --git a/legacy/object_interest_tracking/models/face_landmarker.task b/legacy/object_interest_tracking/models/face_landmarker.task deleted file mode 100644 index c50c845d1..000000000 Binary files a/legacy/object_interest_tracking/models/face_landmarker.task and /dev/null differ diff --git a/legacy/object_interest_tracking/package.xml b/legacy/object_interest_tracking/package.xml deleted file mode 100644 index a50f2ad43..000000000 --- a/legacy/object_interest_tracking/package.xml +++ /dev/null @@ -1,75 +0,0 @@ - - - object_interest_tracking - 0.0.0 - The object_interest_tracking package - - - - - yousef - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - geometry_msgs - message_generation - rospy - sensor_msgs - std_msgs - message_runtime - geometry_msgs - rospy - sensor_msgs - std_msgs - geometry_msgs - message_runtime - rospy - sensor_msgs - std_msgs - message_generation - - - - - - - - diff --git a/legacy/object_interest_tracking/setup.py b/legacy/object_interest_tracking/setup.py deleted file mode 100644 index 06ad1322b..000000000 --- a/legacy/object_interest_tracking/setup.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python3 - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -setup_args = generate_distutils_setup( - packages=['object_interest_tracking'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/object_interest_tracking/src/__init__.py b/legacy/object_interest_tracking/src/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/legacy/object_interest_tracking/src/detect_interest_srv.py b/legacy/object_interest_tracking/src/detect_interest_srv.py deleted file mode 100755 index af2f7e259..000000000 --- a/legacy/object_interest_tracking/src/detect_interest_srv.py +++ /dev/null @@ -1,162 +0,0 @@ -#!/usr/bin/env python3 -import rospy -import numpy as np -import cv2 -from sensor_msgs.msg import Image, PointCloud2 -from object_interest_tracking.srv import Tdr, TdrResponse -from cv_bridge3 import CvBridge -from lasr_vision_msgs.srv import YoloDetection -from geometry_msgs.msg import Point, PointStamped -import ros_numpy as rnp -from std_msgs.msg import String -from tf_module.srv import TfTransform, TfTransformRequest -from tiago_controllers import BaseController -from math import acos -from lasr_shapely import LasrShapely - -from tiago_controllers.helpers.pose_helpers import get_pose_from_param - -from lasr_object_detection_yolo.detect_objects_v8 import detect_objects, perform_detection, debug - -def estimate_pose(pcl_msg, cv_im, detection): - tf = rospy.ServiceProxy("/tf_transform", TfTransform) - contours = np.array(detection).reshape(-1, 2) - mask = np.zeros((cv_im.shape[0], cv_im.shape[1]), np.uint8) - cv2.fillPoly(mask, pts=[contours], color=(255, 255, 255)) - indices = np.argwhere(mask) - if indices.shape[0] == 0: - return np.array([np.inf, np.inf, np.inf]) - pcl_xyz = rnp.point_cloud2.pointcloud2_to_xyz_array(pcl_msg, remove_nans=False) - - xyz_points = [] - for x, y in indices: - x, y, z = pcl_xyz[x][y] - xyz_points.append([x, y, z]) - - x, y, z = np.nanmean(xyz_points, axis=0) - centroid = PointStamped() - centroid.point = Point(x, y, z) - centroid.header = pcl_msg.header - tf_req = TfTransformRequest() - tf_req.target_frame = String("map") - tf_req.point = centroid - tf_req.source_frame = String(pcl_msg.header.frame_id) - response = tf(tf_req) - return response.target_point.point - - -def toNPArray(a): - return np.array([a.x, a.y]) - - -def orderIndices(current, previous): - if len(current) == 1 and len(previous) == 1: - return [0] - - indices = [] - if len(current) == len(previous): - for ma in range(len(current)): - diffs = list(map(lambda lo: np.linalg.norm(toNPArray(current[ma]) - toNPArray(lo)), previous)) - indices.append(diffs.index(min(diffs))) - - return indices - - -def getPoseDiff(lastRecorded, x, y): - distances = [] - for i in lastRecorded: - distances.append(np.linalg.norm(toNPArray(i) - np.array([x, y]))) - return distances - - - - -def v2(req): - cvBridge = CvBridge() - shapley = LasrShapely() - rospy.wait_for_service('/yolov8/detect') - corners = rospy.get_param("/corners_arena") - print(corners, "corners") - - objectRecognitionService = rospy.ServiceProxy('/yolov8/detect', YoloDetection) - basePose = get_pose_from_param("/phase3_lift/pose") - print(basePose, "base pose") - - # BaseController().sync_to_pose(basePose) - - headPoint = None - found = False - ANGLE_THRESHOLD = 10 - REACHING_THRESHOLDS = 2 - - while not found: - rospy.sleep(0.5) - locs = [] - for i in range(2): - img_msg = rospy.wait_for_message('/xtion/rgb/image_raw', Image) - img_depth_msg = rospy.wait_for_message('/xtion/depth_registered/points', PointCloud2) - - - currentLocs = list(filter(lambda i: i.name == "person", - objectRecognitionService(img_msg, 'yolov8n-seg.pt', 0.7, 0.3).detected_objects)) - - cm = [] - rospy.logwarn("0") - - for i2 in range(len(currentLocs)): - # Assume 2 ppl! - pose = estimate_pose(img_depth_msg, cvBridge.imgmsg_to_cv2_np(img_msg), currentLocs[i2].xyseg) - if (shapley.is_point_in_polygon_2d(corners, pose.x, pose.y)): - cm.append(pose) - - locs.append(cm) - rospy.logwarn(len(locs[0])) - - print("vecs11") - - if i == 1 and len(locs[0]) > 0 and len(locs[0]) == len(locs[1]): - # swap if needed - newIndices = orderIndices(cm, locs[0]) - - if (newIndices == [0]): - locs.append(cm) - else: - locs.append([cm[i] for i in newIndices]) - - # CALC VECS - vecs = [] - for i in range(len(locs[1])): - rospy.logerr(locs[1][i]) - a2 = toNPArray(locs[1][i]) - toNPArray(locs[0][i]) - vecs.append(a2) - print("vecs") - - print(vecs) - - # CALC ANGLES - [x, y, q] = BaseController().get_current_pose() - - angles = [] - print(vecs) - for i1 in len(vecs): - angles.append(acos((vecs[i1] - np.array([x, y]))) / ( - np.linalg.norm(vecs[i1]) * np.linalg.norm(np.array([x, y])))) - - distances = getPoseDiff(locs[1], x, y) - rospy.logwarn("dis ANG") - rospy.logwarn(min(angles)) - rospy.logwarn(distances[angles.index(min(angles))]) - - # FACE PERSON - if min(angles) < ANGLE_THRESHOLD and distances[angles.index(min(angles))] < REACHING_THRESHOLDS: - headPoint = locs[angles.index(min(angles))] - found = True - - print(headPoint) - BaseController().sync_face_to(headPoint.x, headPoint.y) - return TdrResponse() - - -rospy.init_node("objectEngagementTracking") -rospy.Service('v2', Tdr, v2) -rospy.spin() \ No newline at end of file diff --git a/legacy/object_interest_tracking/src/settings.py b/legacy/object_interest_tracking/src/settings.py deleted file mode 100644 index 863e8208f..000000000 --- a/legacy/object_interest_tracking/src/settings.py +++ /dev/null @@ -1,3 +0,0 @@ -MEDIAPIPE_MODEL_PATH = "./src/engagmentScore/models/face_landmarker.task" -ANGLE_SCORE = lambda x: -1/5*x+2 -DISCOUNT_FACTOR = 0.5 \ No newline at end of file diff --git a/legacy/object_interest_tracking/srv/Tdr.srv b/legacy/object_interest_tracking/srv/Tdr.srv deleted file mode 100644 index 73b314ff7..000000000 --- a/legacy/object_interest_tracking/srv/Tdr.srv +++ /dev/null @@ -1 +0,0 @@ ---- \ No newline at end of file diff --git a/legacy/lasr_web_server/src/lasr_web_server/__init__.py b/legacy/pcl_segmentation/CATKIN_IGNORE similarity index 100% rename from legacy/lasr_web_server/src/lasr_web_server/__init__.py rename to legacy/pcl_segmentation/CATKIN_IGNORE diff --git a/legacy/pcl_segmentation/package.xml b/legacy/pcl_segmentation/package.xml index b19b36a36..79753ad87 100644 --- a/legacy/pcl_segmentation/package.xml +++ b/legacy/pcl_segmentation/package.xml @@ -7,7 +7,7 @@ - jared + Jared Swift diff --git a/legacy/people_detection/__init__.py b/legacy/people_detection/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/legacy/people_detection/create_dataset/README.md b/legacy/people_detection/create_dataset/README.md deleted file mode 100644 index 5b2eb3449..000000000 --- a/legacy/people_detection/create_dataset/README.md +++ /dev/null @@ -1,47 +0,0 @@ -# create_dataset - -The create_dataset package - -This package is maintained by: -- [nicole](mailto:nicole@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- rospy (build) -- std_msgs (build) -- rospy (exec) -- std_msgs (exec) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -This package has no launch files. - -### Messages - -This package has no messages. - -### Services - -This package has no services. - -### Actions - -This package has no actions. diff --git a/legacy/people_detection/create_dataset/package.xml b/legacy/people_detection/create_dataset/package.xml deleted file mode 100644 index eeea94592..000000000 --- a/legacy/people_detection/create_dataset/package.xml +++ /dev/null @@ -1,65 +0,0 @@ - - - create_dataset - 0.0.0 - The create_dataset package - - - - - nicole - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - rospy - std_msgs - rospy - std_msgs - rospy - std_msgs - - - - - - - - diff --git a/legacy/people_detection/create_dataset/setup.py b/legacy/people_detection/create_dataset/setup.py deleted file mode 100644 index d18c5fb2c..000000000 --- a/legacy/people_detection/create_dataset/setup.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python3 - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -setup_args = generate_distutils_setup( - packages=['create_dataset'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/people_detection/create_dataset/src/create_dataset/__init__.py b/legacy/people_detection/create_dataset/src/create_dataset/__init__.py deleted file mode 100644 index 9d80ae3aa..000000000 --- a/legacy/people_detection/create_dataset/src/create_dataset/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .create_dataset_state import CreateDatasetState -from .talk_create_dataset_state import TalkCreateDatasetState \ No newline at end of file diff --git a/legacy/people_detection/create_dataset/src/create_dataset/create_dataset_state.py b/legacy/people_detection/create_dataset/src/create_dataset/create_dataset_state.py deleted file mode 100755 index 198d6c71a..000000000 --- a/legacy/people_detection/create_dataset/src/create_dataset/create_dataset_state.py +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env python3 -# coding=utf-8 -import os -import shutil -import smach -import rospy -from sensor_msgs.msg import Image -import rospkg -from cv_bridge3 import CvBridge -from cv_bridge3 import cv2 - -import random -import string - - -IMAGE_NUMBER = 10 -SLEEP_TIME = 0.8 -MAX_DURATION = IMAGE_NUMBER*SLEEP_TIME + 2 - -class CreateDatasetState(smach.State): - ''' - Creates a dataset of a person and trains a model to redetect that individual. - To be able to train, the dataset must contain at least 2 people - ''' - def __init__(self): - smach.State.__init__(self, - outcomes=['finished_scan'], - output_keys=['dataset_path'] - ) - self.name = "" - self.path = "" - self.dir_path = self.path = os.path.join(rospkg.RosPack().get_path('create_dataset'), 'dataset') - self.bridge = CvBridge() - self.images_taken = 0 - self.semaphore = False - - # Choose topic - if rospy.get_published_topics(namespace='/xtion'): - self.topic = '/xtion/rgb/image_raw' - else: - self.topic = '/usb_cam/image_raw' - - self.sub = rospy.Subscriber(self.topic, Image, self.img_callback) - - def img_callback(self, msg): - if self.semaphore: - if rospy.Time.now().to_sec() - self.last_time.to_sec() >= SLEEP_TIME and self.images_taken < IMAGE_NUMBER: - cv_image = self.bridge.imgmsg_to_cv2_np(msg) - cv2.imwrite(os.path.join(self.path, f'{self.images_taken}.jpg'), cv_image) - self.images_taken += 1 - print("*" *30," - IMAGE IS TAKEN ", self.images_taken) - self.last_time = rospy.Time.now() - elif self.images_taken >= IMAGE_NUMBER or rospy.Time.now().to_sec() - self.start_time.to_sec() > MAX_DURATION: - self.semaphore = False - - def execute(self, userdata): - rand = ''.join(random.choice(string.ascii_lowercase) for _ in range(7)) - userdata.dataset_path = self.path = os.path.join(rospkg.RosPack().get_path('create_dataset'), 'dataset', rand) - - if os.path.isdir(self.path): - shutil.rmtree(self.path) - os.mkdir(self.path) - - self.semaphore = True - self.start_time = self.last_time = rospy.Time.now() - while self.semaphore: - pass - self.images_taken = 0 - return 'finished_scan' - - -if __name__ == '__main__': - rospy.init_node('smach_example_state_machine') - sm = smach.StateMachine(outcomes=['success']) - with sm: - smach.StateMachine.add('CREATE_DATASET', CreateDatasetState(), - transitions={'finished_scan':'success'}, - remapping={'dataset_path':'dataset_path'}) - outcome = sm.execute() - rospy.loginfo('I have completed execution with outcome: ') - rospy.loginfo(outcome) \ No newline at end of file diff --git a/legacy/people_detection/create_dataset/src/create_dataset/talk_create_dataset_state.py b/legacy/people_detection/create_dataset/src/create_dataset/talk_create_dataset_state.py deleted file mode 100755 index c0b3cbf4c..000000000 --- a/legacy/people_detection/create_dataset/src/create_dataset/talk_create_dataset_state.py +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env python3 -# coding=utf-8 -import rospy -import smach -import random -# from models import Person - -SLEEP_TIME = 5 - -class Person: - """ A person class that saves all the information about a person that the robot meets """ - - def __init__(self, name, fav_drink): - self.name = name - self.fav_drink = fav_drink - - - def get_fav_drink(self): - return self.fav_drink - - def __str__(self): - return ( - f"name : {self.name}\n" - f"fav drink : {self.fav_drink}\n" - ) -class TalkCreateDatasetState(smach.State): - def __init__(self): - smach.State.__init__(self, outcomes=['finished_collecting'], output_keys=['current_person']) - print("start") - self.random_names = ['belly', 'mimi', 'teo', 'pipi'] - self.random_drinks = ['vodka', 'cola', 'pepsi'] - - def execute(self, userdata): - - name = random.choice(self.random_names) - print("What is your name?",name) - - rospy.sleep(SLEEP_TIME) - - drink = random.choice(self.random_drinks) - print("What's your favourite drink?", drink) - - rospy.sleep(SLEEP_TIME) - - print(f"that's boring, i promise you will like {drink}") - userdata.current_person = Person(name, drink) - # userdata.current_guest = Guest(name, drink) - return 'finished_collecting' - # self.dialog_client.send_goal_and_wait(DialogGoal('receptionist')) - # name = rospy.get_param("/guest/name", "unknown") - # if name == "unknown": - # name = None - # # drink = rospy.get_param("/guest/drink", "unknown") - # # if drink == "unknown": - # drink = None \ No newline at end of file diff --git a/legacy/people_detection/recognise_people/CMakeLists.txt b/legacy/people_detection/recognise_people/CMakeLists.txt deleted file mode 100644 index 0eaf4e41a..000000000 --- a/legacy/people_detection/recognise_people/CMakeLists.txt +++ /dev/null @@ -1,208 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(recognise_people) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - rospy - std_msgs - sensor_msgs - message_runtime - message_generation - lasr_vision_msgs -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Detection.msg -# ) - -## Generate services in the 'srv' folder -add_service_files( - FILES - RecognisePeople.srv -) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -generate_messages( - DEPENDENCIES - std_msgs - sensor_msgs - lasr_vision_msgs -) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES lasr_object_detection_yolo -# CATKIN_DEPENDS rospy std_msgs -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/lasr_object_detection_yolo.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/lasr_object_detection_yolo_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_lasr_object_detection_yolo.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/legacy/people_detection/recognise_people/README.md b/legacy/people_detection/recognise_people/README.md deleted file mode 100644 index 4351c3394..000000000 --- a/legacy/people_detection/recognise_people/README.md +++ /dev/null @@ -1,62 +0,0 @@ -# recognise_people - -The recognise_people package - -This package is maintained by: -- [nicole](mailto:nicole@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- rospy (build) -- std_msgs (build) -- rospy (exec) -- std_msgs (exec) -- lasr_vision_msgs - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -This package has no launch files. - -### Messages - -This package has no messages. - -### Services - -#### `RecognisePeople` - -Request - -| Field | Type | Description | -|:-:|:-:|---| -| detected_objects_yolo | lasr_vision_msgs/Detection[] | | -| detected_objects_opencv | lasr_vision_msgs/Detection[] | | - -Response - -| Field | Type | Description | -|:-:|:-:|---| -| detected_objects | lasr_vision_msgs/Detection[] | | - - -### Actions - -This package has no actions. diff --git a/legacy/people_detection/recognise_people/package.xml b/legacy/people_detection/recognise_people/package.xml deleted file mode 100644 index 90bdd6695..000000000 --- a/legacy/people_detection/recognise_people/package.xml +++ /dev/null @@ -1,66 +0,0 @@ - - - recognise_people - 0.0.0 - The recognise_people package - - - - - nicole - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - rospy - std_msgs - rospy - std_msgs - rospy - std_msgs - lasr_vision_msgs - - - - - - - - \ No newline at end of file diff --git a/legacy/people_detection/recognise_people/setup.py b/legacy/people_detection/recognise_people/setup.py deleted file mode 100644 index aca85b46a..000000000 --- a/legacy/people_detection/recognise_people/setup.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python3 - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -setup_args = generate_distutils_setup( - packages=['recognise_people'], - package_dir={'': 'src'} -) - -setup(**setup_args) \ No newline at end of file diff --git a/legacy/people_detection/recognise_people/src/recognise_people/recognise_people_srv.py b/legacy/people_detection/recognise_people/src/recognise_people/recognise_people_srv.py deleted file mode 100755 index c4a48c862..000000000 --- a/legacy/people_detection/recognise_people/src/recognise_people/recognise_people_srv.py +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env python3 - -from recognise_people.srv import RecognisePeople, RecognisePeopleResponse -import rospy -from cv_bridge3 import CvBridge -from lasr_vision_msgs.msg import Detection - -from cv_bridge3 import cv2 -import random, os, rospkg - -class RecognisePeopleServer(): - """ - A Server for recognising known people using yolo and opencv - """ - - def __init__(self): - - self.face_detections = [] - self.yolo_detections = [] - self.bridge = CvBridge() - - - #todo: get known people form rosparam or another funct - - def recogniser(self, req): - response = RecognisePeopleResponse() - - self.yolo_detections = req.detected_objects_yolo - self.face_detections = req.detected_objects_opencv - detection_map = {} - # print(self.face_detections[0].name) - # print('-'*40) - # print(self.yolo_detections[0].name) - i = 0 - if len(self.yolo_detections) < 1 and len(self.face_detections) < 1: - print('empty resp') - return response - - # get bb of person using face bb and body bb - # maps name -> face(name, confidence, xywh) and yolo(name, conf, xywh) - for face in self.face_detections: - for yolo in self.yolo_detections: - x1, y1, x2, y2 = face.xywh - print(yolo, 'the yoloooo ') - - head_x = (x1 + x2) / 2 - body_x = (yolo.xywh[0] + yolo.xywh[2]) / 2 # center of the body in the x axis - # self.image_show(yolo.name, yolo.confidence,yolo.xywh, i) - i = i+1 - - if abs(head_x - body_x) < 20: - if not face.name in detection_map.keys(): - print('detection map', face, yolo) - detection_map[face.name] = (face, yolo) - print("yolo detection here, bb overlapped.") - break - # if newer detection has bigger confidence - if yolo.confidence > detection_map[face.name][1].confidence: - detection_map[face.name] = (face, yolo) - else: - print(face.name, face.xywh, yolo.xywh) - - print('-'* 40) - print(detection_map, 'detection map\n') - - if len(self.face_detections) > len(self.yolo_detections) and len(self.face_detections) > 0: - for face in self.face_detections: - # Append detection - response.detected_objects.append( - Detection( - name=face.name, - confidence=face.confidence, - xywh=face.xywh - ) - ) - print('the face recognitons are', face) - elif len(self.face_detections) < 1 and len(self.yolo_detections) > 0: - for person in self.yolo_detections: - # Append detection. - response.detected_objects.append( - Detection( - name=person.name, - confidence=person.confidence, - xywh=person.xywh - ) - ) - else: - response = [] - - print(response) - return response - - def image_show(self, name, proba, dim, i): - x1, y1, x2, y2 = dim - path_output = os.path.join(rospkg.RosPack().get_path('face_detection'), "output") - image = cv2.imread(path_output + "/images/random.jpg",0) - # draw the bounding box of the face along with the associated - # probability - text = "{}: {:.2f}%".format(name, proba * 100) - y = y1 - 10 if y1 - 10 > 10 else y1 + 10 - cv2.rectangle(image, (x1, y1), (x2+x1, y2+y1), - (0, 0, 255), 2) - cv2.putText(image, text, (x1, y), - cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2) - # show the output image - - bridge = CvBridge() - # cv_image = bridge.imgmsg_to_cv2(image, desired_encoding='passthrough') - cv2.imwrite(path_output + "/images/random" + str(i+1)+".jpg", image) - - -if __name__ == "__main__": - rospy.init_node("recognise_people_server") - server = RecognisePeopleServer() - service = rospy.Service('recognise_people_server', RecognisePeople, server.recogniser) - rospy.loginfo("Recognise People Service initialised") - rospy.spin() diff --git a/legacy/people_detection/recognise_people/src/recognise_people/recognise_state.py b/legacy/people_detection/recognise_people/src/recognise_people/recognise_state.py deleted file mode 100755 index 299bb952f..000000000 --- a/legacy/people_detection/recognise_people/src/recognise_people/recognise_state.py +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/env python3 -# coding=utf-8 -import os -import shutil -import rospy -from sensor_msgs.msg import Image, PointCloud2 -import rospkg -import sys -from face_detection.recognize import recognize -from face_detection.train_model import train -from cv_bridge3 import CvBridge -from cv_bridge3 import cv2 -import smach -import itertools - -from robocup_receptionist.utils.servers_clients import detect_person -from robocup_receptionist.helpers import get_percentage_overlap, get_corners - -from face_detection.srv import FaceDetection, FaceDetectionPCL - -''' - Redetects people in the robot's view using a trained model. -''' -## TODO: refactor - -class RedetectState(smach.State): - def __init__(self, head_controller, torso_controller): - smach.State.__init__(self, input_keys=["guest_list"], output_keys=['introduction_combinations', "guest_list"], - outcomes=['people_recognized', 'no_people_recognized']) - self.bridge = CvBridge() - self.head_controller = head_controller - self.torso_controller = torso_controller - - if self.head_controller: - self.rotate_head = self.head_controller.sync_reach_to - else: - self.rotate_head = lambda *args, **kwargs: None - - # Use PointCloud2 - if rospy.get_published_topics(namespace='/xtion'): - rospy.wait_for_service("face_detection_pcl") - self.face_detection = rospy.ServiceProxy("face_detection_pcl", FaceDetectionPCL) - self.topic = '/xtion/depth_registered/points' - self.dtype = PointCloud2 - else: - # Use Image - rospy.wait_for_service("people_detection") - self.face_detection = rospy.ServiceProxy("people_detection", FaceDetection) - self.topic = '/usb_cam/image_raw' - self.dtype = Image - - def execute(self, userdata): - print(userdata.guest_list) - direction = 0.2 - - face_detections = [] - - yolo_detections = [] - - detection_map = {} - - self.torso_controller.sync_raise(0.4) - - for _ in range(4): - direction *= -1 - self.rotate_head(direction, -0.2, velocities=[0.1, 0.0]) - - pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) - face_detection_resp = self.face_detection(pcl_msg) - yolo_detections.extend(detect_person()) - - for detection in face_detection_resp.detections: - try: - n = [d.name for d in face_detections].index(detection.name) - if detection.confidence > face_detections[n].confidence: - face_detections[n] = detection - except ValueError: - face_detections.append(detection) - - # get bb of person using face bb and body bb - for face in face_detections: - for detection in yolo_detections: - x1, y1, x2, y2 = face.bb - - head_x = (x1 + x2) / 2 # center of head in x-axis - body_x = (detection.xywh[0] + detection.xywh[2]) / 2 # center of body in x-axis - - if abs(head_x - body_x) < 20: - if not face.name in detection_map.keys(): - detection_map[face.name] = (face, detection) - print("yolo detection here, bb overlapped.") - break - if detection.confidence > detection_map[face.name][1].confidence: - detection_map[face.name] = (face, detection) - else: - print(face.name, face.bb, detection.xywh) - - intersect = [] - if len(detection_map.keys()): - for face_detection, yolo_detection in detection_map.values(): - print(f"Checking {face_detection.name}") - try: - n = [g.name for g in userdata.guest_list].index(face_detection.name) - userdata.guest_list[n].last_known_pose = yolo_detection.centroid - intersect.append(userdata.guest_list[n]) - except ValueError: - continue - print(intersect) - - # Create list of combinations of pairs of guests. - comb = list(itertools.combinations([g for g in intersect], 2)) - - # Sort the combinations. - comb = sorted(comb, key=lambda element: (element[0].name, element[1].name)) - - # Insert the reverse of each pair, ensuring the sorting critera is maintained. - comb = [a for pair in [(c, c[::-1]) for c in comb] for a in pair] - - print(comb) - userdata.introduction_combinations = comb - return 'people_recognized' - else: - return 'no_people_recognized' - - -if __name__ == '__main__': - rospy.init_node('smach_example_state_machine') - sm = smach.StateMachine(outcomes=['success']) - with sm: - smach.StateMachine.add('REDETECT', RedetectState(), - transitions={'finished_redetection': 'success'}, - remapping={'out_redetected_people': 'redetected_people'}) - outcome = sm.execute() - rospy.loginfo('I have completed execution with outcome: ') - rospy.loginfo(outcome) \ No newline at end of file diff --git a/legacy/people_detection/recognise_people/srv/RecognisePeople.srv b/legacy/people_detection/recognise_people/srv/RecognisePeople.srv deleted file mode 100644 index ec43502a4..000000000 --- a/legacy/people_detection/recognise_people/srv/RecognisePeople.srv +++ /dev/null @@ -1,4 +0,0 @@ -lasr_vision_msgs/Detection[] detected_objects_yolo -lasr_vision_msgs/Detection[] detected_objects_opencv ---- -lasr_vision_msgs/Detection[] detected_objects diff --git a/legacy/people_detection/train_dataset_model/CMakeLists.txt b/legacy/people_detection/train_dataset_model/CMakeLists.txt deleted file mode 100644 index 5088a7e79..000000000 --- a/legacy/people_detection/train_dataset_model/CMakeLists.txt +++ /dev/null @@ -1,205 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2) -project(train_dataset_model) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - rospy - std_msgs -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html - catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder -# add_service_files( -# FILES -# Service1.srv -# Service2.srv -# ) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -# generate_messages( -# DEPENDENCIES -# std_msgs -# ) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES train_dataset_model -# CATKIN_DEPENDS rospy std_msgs -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/train_dataset_model.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/train_dataset_model_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_train_dataset_model.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/legacy/people_detection/train_dataset_model/README.md b/legacy/people_detection/train_dataset_model/README.md deleted file mode 100644 index c7217078b..000000000 --- a/legacy/people_detection/train_dataset_model/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# train_dataset_model - -The train_dataset_model package - -This package is maintained by: -- [nicole](mailto:nicole@todo.todo) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- rospy (build) -- std_msgs (build) -- rospy (exec) -- std_msgs (exec) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -The train model state has the following file : - -1. train model => simply call a method to train -2. extract embeddings -3. train model state => i currently use this state to train - -on line 19 and line 20 you have to set some variables -> i will work to make it easier in the future - -when you have run the create_dataset: - -```python -rosrun create_dataset create_dataset_state.py -``` - -and you have a folder with a random name for instance 'wfeigq' -then you set that folder name in _line 20_ in train_model_state.py -on _line 19_ you set the name that you want to recognise the person with. - -I currently take 10 photos and recognise 5 people: - -1. flatmate -2. american -3. elisabeth -4. gerard -5. matteo - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -This package has no launch files. - -### Messages - -This package has no messages. - -### Services - -This package has no services. - -### Actions - -This package has no actions. diff --git a/legacy/people_detection/train_dataset_model/__init__.py b/legacy/people_detection/train_dataset_model/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/legacy/people_detection/train_dataset_model/doc/USAGE.md b/legacy/people_detection/train_dataset_model/doc/USAGE.md deleted file mode 100644 index 412c3cee1..000000000 --- a/legacy/people_detection/train_dataset_model/doc/USAGE.md +++ /dev/null @@ -1,25 +0,0 @@ -The train model state has the following file : - -1. train model => simply call a method to train -2. extract embeddings -3. train model state => i currently use this state to train - -on line 19 and line 20 you have to set some variables -> i will work to make it easier in the future - -when you have run the create_dataset: - -```python -rosrun create_dataset create_dataset_state.py -``` - -and you have a folder with a random name for instance 'wfeigq' -then you set that folder name in _line 20_ in train_model_state.py -on _line 19_ you set the name that you want to recognise the person with. - -I currently take 10 photos and recognise 5 people: - -1. flatmate -2. american -3. elisabeth -4. gerard -5. matteo diff --git a/legacy/people_detection/train_dataset_model/package.xml b/legacy/people_detection/train_dataset_model/package.xml deleted file mode 100644 index 5e35508b3..000000000 --- a/legacy/people_detection/train_dataset_model/package.xml +++ /dev/null @@ -1,65 +0,0 @@ - - - train_dataset_model - 0.0.0 - The train_dataset_model package - - - - - nicole - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - rospy - std_msgs - rospy - std_msgs - rospy - std_msgs - - - - - - - - diff --git a/legacy/people_detection/train_dataset_model/src/__init__.py b/legacy/people_detection/train_dataset_model/src/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/legacy/people_detection/train_dataset_model/src/train_dataset_model/__init__.py b/legacy/people_detection/train_dataset_model/src/train_dataset_model/__init__.py deleted file mode 100644 index 1bdbcae27..000000000 --- a/legacy/people_detection/train_dataset_model/src/train_dataset_model/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .train_model_state import TrainModelState \ No newline at end of file diff --git a/legacy/people_detection/train_dataset_model/src/train_dataset_model/extract_embeddings.py b/legacy/people_detection/train_dataset_model/src/train_dataset_model/extract_embeddings.py deleted file mode 100644 index 52d6b0494..000000000 --- a/legacy/people_detection/train_dataset_model/src/train_dataset_model/extract_embeddings.py +++ /dev/null @@ -1,105 +0,0 @@ -#!/usr/bin/env python3 -# import the necessary packages -import numpy as np -import argparse -import imutils -from imutils import paths -import pickle -import cv2 -import os -import rospkg - - -# Responsible for using a deep learning feature extractor to generate a 128-D vector describing a face. All faces in -# our dataset will be passed through the neural network to generate embeddings. - -def extract(): - # load our serialized face detector from disk - path_face_detection_model = os.path.join(rospkg.RosPack().get_path('face_detection'), 'caffe_model') - path_nn = os.path.join(rospkg.RosPack().get_path('face_detection'), 'nn4', "nn4.small2.v1.t7") - path_dataset = os.path.join(rospkg.RosPack().get_path('create_dataset'), "dataset") - path_output = os.path.join(rospkg.RosPack().get_path('face_detection'), "output") - - print("[INFO] loading face detector...") - proto_path = os.path.join(path_face_detection_model, "deploy.prototxt") - model_path = os.path.join(path_face_detection_model, - "res10_300x300_ssd_iter_140000.caffemodel") - detector = cv2.dnn.readNetFromCaffe(proto_path, model_path) - # load our serialized face embedding model from disk - print("[INFO] loading face recognizer...") - embedder = cv2.dnn.readNetFromTorch(path_nn) - - # grab the paths to the input images in our dataset - print("[INFO] quantifying faces...") - image_paths = list(paths.list_images(path_dataset)) - print(image_paths) - # initialize our lists of extracted facial embeddings and - # corresponding people names - known_embeddings = [] - known_names = [] - # initialize the total number of faces processed - total = 0 - - # loop over the image paths - for (i, imagePath) in enumerate(image_paths): - # extract the person name from the image path - print("[INFO] processing image {}/{}".format(i + 1, - len(image_paths))) - name = imagePath.split(os.path.sep)[-2] - # load the image, resize it to have a width of 600 pixels (while - # maintaining the aspect ratio), and then grab the image - # dimensions - image = cv2.imread(imagePath) - print(imagePath) - image = imutils.resize(image, width=600) - (h, w) = image.shape[:2] - - # construct a blob from the image - image_blob = cv2.dnn.blobFromImage( - cv2.resize(image, (300, 300)), 1.0, (300, 300), - (104.0, 177.0, 123.0), swapRB=False, crop=False) - # apply OpenCV's deep learning-based face detector to localize - # faces in the input image - detector.setInput(image_blob) - detections = detector.forward() - # ensure at least one face was found - if len(detections) > 0: - # we're making the assumption that each image has only ONE - # face, so find the bounding box with the largest probability - i = np.argmax(detections[0, 0, :, 2]) - confidence = detections[0, 0, i, 2] - # ensure that the detection with the largest probability also - # means our minimum probability test (thus helping filter out - # weak detections) - if confidence > 0.5: - # compute the (x, y)-coordinates of the bounding box for - # the face - box = detections[0, 0, i, 3:7] * np.array([w, h, w, h]) - (startX, startY, endX, endY) = box.astype("int") - - # extract the face ROI and grab the ROI dimensions - face = image[startY:endY, startX:endX] - (fH, fW) = face.shape[:2] - # ensure the face width and height are sufficiently large - if fW < 20 or fH < 20: - continue - # construct a blob for the face ROI, then pass the blob - # through our face embedding model to obtain the 128-d - # quantification of the face - faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255, - (96, 96), (0, 0, 0), swapRB=True, crop=False) - embedder.setInput(faceBlob) - vec = embedder.forward() - # add the name of the person + corresponding face - # embedding to their respective lists - known_names.append(name) - - known_embeddings.append(vec.flatten()) - total += 1 - # dump the facial embeddings + names to disk - print("[INFO] serializing {} encodings...".format(total)) - print('YO I KNOW THESE PEEPS: ', known_names) - data = {"embeddings": known_embeddings, "names": known_names} - f = open(path_output + "/embeddings.pickle", "wb") - f.write(pickle.dumps(data)) - f.close() \ No newline at end of file diff --git a/legacy/people_detection/train_dataset_model/src/train_dataset_model/train_model.py b/legacy/people_detection/train_dataset_model/src/train_dataset_model/train_model.py deleted file mode 100644 index 2096b13b0..000000000 --- a/legacy/people_detection/train_dataset_model/src/train_dataset_model/train_model.py +++ /dev/null @@ -1,39 +0,0 @@ -from sklearn.preprocessing import LabelEncoder -from sklearn.svm import SVC -import pickle -import os -import rospkg - -# Detect faces, extract embeddings, and fit our SVM model to the embeddings data. - -# construct the argument parser and parse the arguments -# load the face embeddings -def train_model(): - # self.path = os.path.join(rospkg.RosPack().get_path('people_detection'), 'dataset', - # self.name) - path = os.path.join(rospkg.RosPack().get_path('face_detection'), 'output', 'embeddings.pickle') - path_output = os.path.join(rospkg.RosPack().get_path('face_detection'), 'output') - - print("[INFO] loading face embeddings...") - print(open(path, "rb").read()) - data = pickle.loads(open(path, "rb").read()) - # encode the labels - print("[INFO] encoding labels...") - le = LabelEncoder() - labels = le.fit_transform(data["names"]) - - # train the model used to accept the 128-d embeddings of the face and - # then produce the actual face recognition - print("[INFO] training model...") - recognizer = SVC(C=1.0, kernel="linear", probability=True) - recognizer.fit(data["embeddings"], labels) - - # write the actual face recognition model to disk - f = open(os.path.join(path_output, "recognizer.pickle"), "wb") - f.write(pickle.dumps(recognizer)) - f.close() - # write the label encoder to disk - f = open(os.path.join(path_output, "le.pickle"), "wb") - f.write(pickle.dumps(le)) - f.close() - diff --git a/legacy/people_detection/train_dataset_model/src/train_dataset_model/train_model_state.py b/legacy/people_detection/train_dataset_model/src/train_dataset_model/train_model_state.py deleted file mode 100755 index 74adb1c11..000000000 --- a/legacy/people_detection/train_dataset_model/src/train_dataset_model/train_model_state.py +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env python3 -import rospy -import os, rospkg -import shutil -import smach -from extract_embeddings import extract -from train_model import train_model as train - - -class TrainModelState(smach.State): - def __init__(self): - smach.State.__init__(self, - outcomes=['finished_training', 'training_failed'], - input_keys=['current_person', 'dataset_path'], # ! comment for tesing purposes - output_keys=['current_person']) # ! comment for tesing purposes - - - def execute(self, userdata): - name = 'matteo' - dataset_path = os.path.join(rospkg.RosPack().get_path('create_dataset'), 'dataset', 'mateo') - new = dataset_path.replace(dataset_path.split(os.path.sep)[-1], name) - if os.path.exists(new): - shutil.rmtree(new) - - os.renames(dataset_path, - dataset_path.replace(dataset_path.split(os.path.sep)[-1], name)) - - if len(next(os.walk(os.path.dirname(dataset_path)))[1]) > 1: - extract() - train() - return 'finished_training' - else: - rospy.logwarn("At least 2 datasets are required for training.") - return 'training_failed' - -if __name__ == '__main__': - rospy.init_node('smach_example_state_machine') - name = 'alice' - dataset_path = os.path.join(rospkg.RosPack().get_path('create_dataset'), 'dataset', 'alice') - sm = smach.StateMachine(outcomes=['success', 'failed']) - with sm: - smach.StateMachine.add('TRAIN_MODEL', TrainModelState(), - transitions={'finished_training':'success', 'training_failed':'failed'}, - remapping={'dataset_path':'dataset_path', - 'current_person':'current_person'}) - outcome = sm.execute() - rospy.loginfo('I have completed execution with outcome: ') - rospy.loginfo(outcome) diff --git a/legacy/listen_module/__init__.py b/legacy/read_pcl_info/CATKIN_IGNORE similarity index 100% rename from legacy/listen_module/__init__.py rename to legacy/read_pcl_info/CATKIN_IGNORE diff --git a/skills/CMakeLists.txt b/skills/CMakeLists.txt index cc256ca3d..6a5978ed6 100644 --- a/skills/CMakeLists.txt +++ b/skills/CMakeLists.txt @@ -10,6 +10,7 @@ project(lasr_skills) find_package(catkin REQUIRED COMPONENTS rospy lasr_vision_msgs + lasr_vector_databases_msgs ) ## System dependencies are found with CMake's conventions diff --git a/skills/config/demo.yaml b/skills/config/demo.yaml deleted file mode 100644 index 83aa5d563..000000000 --- a/skills/config/demo.yaml +++ /dev/null @@ -1,5 +0,0 @@ -living_room: - table: - location: - position: {x: 1.526343, y: 0.523707, z: 0.0024719} - orientation: { x: 0.0, y: 0.0, z: 0.746444684543, w: 0.665447468188 } \ No newline at end of file diff --git a/skills/config/motions.yaml b/skills/config/motions.yaml new file mode 100644 index 000000000..8d28c5b52 --- /dev/null +++ b/skills/config/motions.yaml @@ -0,0 +1,22 @@ +play_motion: + motions: + reach_arm: + joints: [arm_1_joint, arm_2_joint, arm_3_joint, arm_4_joint, arm_5_joint, arm_6_joint, arm_7_joint] + points: + - positions: [1.61, -0.93, -3.14, 1.83, -1.58, -0.53, 0.00] + time_from_start: 0.0 + open_gripper: + joints: [gripper_left_finger_joint, gripper_right_finger_joint] + points: + - positions: [0.04, 0.04] + time_from_start: 0.0 + pre_navigation: + joints: [torso_lift_joint] + points: + - positions: [0.15] + time_from_start: 0.0 + post_navigation: + joints: [torso_lift_joint, head_1_joint, head_2_joint] + points: + - positions: [0.25, 0.0, 0.0] + time_from_start: 0.0 \ No newline at end of file diff --git a/skills/package.xml b/skills/package.xml index be2d32977..053a2c85b 100644 --- a/skills/package.xml +++ b/skills/package.xml @@ -7,7 +7,7 @@ - Jared Swift + Jared Swift @@ -53,6 +53,7 @@ rospy rospy lasr_vision_msgs + lasr_vector_databases_msgs diff --git a/skills/src/lasr_skills/__init__.py b/skills/src/lasr_skills/__init__.py index b278ee554..abb498c39 100755 --- a/skills/src/lasr_skills/__init__.py +++ b/skills/src/lasr_skills/__init__.py @@ -1,13 +1,17 @@ -from .detect_objects import DetectObjects -from .detect_objects_3d import DetectObjects3D -from .detect_objects_in_area_3d import DetectObjectsInArea3D -from .detect_people import DetectPeople -from .detect_people_3d import DetectPeople3D -from .detect_people_in_area_3d import DetectPeopleInArea3D +from .detect import Detect +from .detect_3d import Detect3D +from .detect_3d_in_area import Detect3DInArea from .wait_for_person import WaitForPerson from .wait_for_person_in_area import WaitForPersonInArea from .describe_people import DescribePeople from .look_to_point import LookToPoint -from .look_to_point import LookToPoint +from .play_motion import PlayMotion from .go_to_location import GoToLocation -from .go_to_semantic_location import GoToSemanticLocation \ No newline at end of file +from .go_to_semantic_location import GoToSemanticLocation +from .say import Say +from .listen import Listen +from .listen_for import ListenFor +from .receive_object import ReceiveObject +from .handover_object import HandoverObject +from .ask_and_listen import AskAndListen +from .clip_vqa import QueryImage diff --git a/skills/src/lasr_skills/ask_and_listen.py b/skills/src/lasr_skills/ask_and_listen.py new file mode 100644 index 000000000..cd133437e --- /dev/null +++ b/skills/src/lasr_skills/ask_and_listen.py @@ -0,0 +1,97 @@ +import smach +from lasr_skills import Listen +from lasr_skills import Say + +from typing import Union + + +class AskAndListen(smach.StateMachine): + def __init__( + self, + tts_phrase: Union[str, None] = None, + tts_phrase_format_str: Union[str, None] = None, + ): + + if tts_phrase is not None: + smach.StateMachine.__init__( + self, + outcomes=["succeeded", "failed"], + output_keys=["transcribed_speech"], + ) + with self: + smach.StateMachine.add( + "SAY", + Say(text=tts_phrase), + transitions={ + "succeeded": "LISTEN", + "aborted": "failed", + "preempted": "failed", + }, + ) + smach.StateMachine.add( + "LISTEN", + Listen(), + transitions={ + "succeeded": "succeeded", + "aborted": "failed", + "preempted": "failed", + }, + remapping={"sequence": "transcribed_speech"}, + ) + elif tts_phrase_format_str is not None: + smach.StateMachine.__init__( + self, + outcomes=["succeeded", "failed"], + output_keys=["transcribed_speech"], + input_keys=["tts_phrase_placeholders"], + ) + with self: + smach.StateMachine.add( + "SAY", + Say(format_str=tts_phrase_format_str), + transitions={ + "succeeded": "LISTEN", + "aborted": "failed", + "preempted": "failed", + }, + remapping={"placeholders": "tts_phrase_placeholders"}, + ) + smach.StateMachine.add( + "LISTEN", + Listen(), + transitions={ + "succeeded": "succeeded", + "aborted": "failed", + "preempted": "failed", + }, + remapping={"sequence": "transcribed_speech"}, + ) + + else: + smach.StateMachine.__init__( + self, + outcomes=["succeeded", "failed"], + output_keys=["transcribed_speech"], + input_keys=["tts_phrase"], + ) + with self: + smach.StateMachine.add( + "SAY", + Say(), + transitions={ + "succeeded": "LISTEN", + "aborted": "failed", + "preempted": "failed", + }, + remapping={"text": "tts_phrase"}, + ) + smach.StateMachine.add( + "LISTEN", + Listen(), + transitions={ + "succeeded": "succeeded", + "aborted": "failed", + "preempted": "failed", + }, + remapping={"sequence": "transcribed_speech"}, + ) diff --git a/skills/src/lasr_skills/clip_vqa.py b/skills/src/lasr_skills/clip_vqa.py new file mode 100755 index 000000000..7126d10c0 --- /dev/null +++ b/skills/src/lasr_skills/clip_vqa.py @@ -0,0 +1,24 @@ +import smach_ros +from lasr_vision_msgs.srv import Vqa, VqaRequest + +from typing import List, Union + + +class QueryImage(smach_ros.ServiceState): + + def __init__(self, possible_answers: Union[None, List[str]] = None): + + if possible_answers is not None: + super(QueryImage, self).__init__( + "/clip_vqa/query_service", + Vqa, + request=VqaRequest(possible_answers=possible_answers), + response_slots=["answer", "similarity"], + ) + else: + super(QueryImage, self).__init__( + "/clip_vqa/query_service", + Vqa, + request_slots=["possible_answers"], + response_slots=["answer", "similarity"], + ) diff --git a/skills/src/lasr_skills/detect.py b/skills/src/lasr_skills/detect.py new file mode 100644 index 000000000..a647bd108 --- /dev/null +++ b/skills/src/lasr_skills/detect.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python3 + +import rospy +import smach +from sensor_msgs.msg import Image + +from lasr_vision_msgs.srv import YoloDetection + +from typing import List, Union + + +class Detect(smach.State): + + def __init__( + self, + image_topic: str = "/xtion/rgb/image_raw", + model: str = "yolov8n.pt", + filter: Union[List[str], None] = None, + confidence: float = 0.5, + nms: float = 0.3, + ): + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + output_keys=["detections"], + ) + self.image_topic = image_topic + self.model = model + self.filter = filter if filter is not None else [] + self.confidence = confidence + self.nms = nms + self.yolo = rospy.ServiceProxy("/yolov8/detect", YoloDetection) + self.yolo.wait_for_service() + + def execute(self, userdata): + img_msg = rospy.wait_for_message(self.image_topic, Image) + try: + result = self.yolo(img_msg, self.model, self.confidence, self.nms) + result.detected_objects = [ + det for det in result.detected_objects if det.name in self.filter + ] + userdata.detections = result + return "succeeded" + except rospy.ServiceException as e: + rospy.logwarn(f"Unable to perform inference. ({str(e)})") + return "failed" diff --git a/skills/src/lasr_skills/detect_3d.py b/skills/src/lasr_skills/detect_3d.py new file mode 100644 index 000000000..0e8eba925 --- /dev/null +++ b/skills/src/lasr_skills/detect_3d.py @@ -0,0 +1,44 @@ +import rospy +import smach + +from sensor_msgs.msg import PointCloud2 +from lasr_vision_msgs.srv import YoloDetection3D + +from typing import List, Union + + +class Detect3D(smach.State): + + def __init__( + self, + depth_topic: str = "/xtion/depth_registered/points", + model: str = "yolov8n-seg.pt", + filter: Union[List[str], None] = None, + confidence: float = 0.5, + nms: float = 0.3, + ): + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + output_keys=["detections_3d"], + ) + self.depth_topic = depth_topic + self.model = model + self.filter = filter if filter is not None else [] + self.confidence = confidence + self.nms = nms + self.yolo = rospy.ServiceProxy("/yolov8/detect3d", YoloDetection3D) + self.yolo.wait_for_service() + + def execute(self, userdata): + pcl_msg = rospy.wait_for_message(self.depth_topic, PointCloud2) + try: + result = self.yolo(pcl_msg, self.model, self.confidence, self.nms) + result.detected_objects = [ + det for det in result.detected_objects if det.name in self.filter + ] + userdata.detections_3d = result + return "succeeded" + except rospy.ServiceException as e: + rospy.logwarn(f"Unable to perform inference. ({str(e)})") + return "failed" diff --git a/skills/src/lasr_skills/detect_3d_in_area.py b/skills/src/lasr_skills/detect_3d_in_area.py new file mode 100644 index 000000000..8f3d32a37 --- /dev/null +++ b/skills/src/lasr_skills/detect_3d_in_area.py @@ -0,0 +1,72 @@ +import smach + +from lasr_skills import Detect3D + +from typing import List, Union + +from shapely.geometry import Point +from shapely.geometry.polygon import Polygon + + +class Detect3DInArea(smach.StateMachine): + + class FilterDetections(smach.State): + + def __init__(self, area_polygon: Polygon): + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + input_keys=["detections_3d"], + output_keys=["detections_3d"], + ) + self.area_polygon = area_polygon + + def execute(self, userdata): + detected_objects = userdata["detections_3d"].detected_objects + + satisfied_points = [ + self.area_polygon.contains(Point(object.point.x, object.point.y)) + for object in detected_objects + ] + filtered_detections = [ + detected_objects[i] + for i in range(0, len(detected_objects)) + if satisfied_points[i] + ] + + userdata["detections_3d"] = filtered_detections + return "succeeded" + + def __init__( + self, + area_polygon: Polygon, + depth_topic: str = "/xtion/depth_registered/points", + model: str = "yolov8n-seg.pt", + filter: Union[List[str], None] = None, + confidence: float = 0.5, + nms: float = 0.3, + ): + smach.StateMachine.__init__( + self, + outcomes=["succeeded", "failed"], + output_keys=["detections_3d"], + ) + + with self: + + smach.StateMachine.add( + "DETECT_OBJECTS_3D", + Detect3D( + depth_topic=depth_topic, + model=model, + filter=filter, + confidence=confidence, + nms=nms, + ), + transitions={"succeeded": "FILTER_DETECTIONS", "failed": "failed"}, + ) + smach.StateMachine.add( + "FILTER_DETECTIONS", + self.FilterDetections(area_polygon), + transitions={"succeeded": "succeeded", "failed": "failed"}, + ) diff --git a/skills/src/lasr_skills/detect_objects.py b/skills/src/lasr_skills/detect_objects.py deleted file mode 100644 index 195587f82..000000000 --- a/skills/src/lasr_skills/detect_objects.py +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env python3 - -import rospy -import smach -from sensor_msgs.msg import Image - -from lasr_vision_msgs.srv import YoloDetection - -class DetectObjects(smach.State): - - def __init__(self): - smach.State.__init__(self, outcomes=['succeeded', 'failed'], input_keys=['image_topic', 'img_msg', 'filter'], output_keys=['detections']) - self.yolo = rospy.ServiceProxy('/yolov8/detect', YoloDetection) - - def execute(self, userdata): - img_msg = rospy.wait_for_message(userdata.image_topic, Image) - try: - result = self.yolo(img_msg, "yolov8n.pt", 0.5, 0.3) - result.detected_objects = [det for det in result.detected_objects if det.name in userdata.filter] - userdata.detections = result - return 'succeeded' - except rospy.ServiceException as e: - rospy.logwarn(f"Unable to perform inference. ({str(e)})") - return 'failed' \ No newline at end of file diff --git a/skills/src/lasr_skills/detect_objects_3d.py b/skills/src/lasr_skills/detect_objects_3d.py deleted file mode 100644 index 239098bf8..000000000 --- a/skills/src/lasr_skills/detect_objects_3d.py +++ /dev/null @@ -1,94 +0,0 @@ -#!/usr/bin/env python3 -import rospy -import smach - -from common_math import pcl_msg_to_cv2, seg_to_centroid -from cv_bridge3 import CvBridge -from tf_module.srv import TfTransform, TfTransformRequest -from std_msgs.msg import String -from geometry_msgs.msg import PointStamped, Point -from sensor_msgs.msg import PointCloud2 -from geometry_msgs.msg import PointStamped -from visualization_msgs.msg import Marker -import numpy as np - -from lasr_vision_msgs.srv import YoloDetection - - - -class DetectObjects3D(smach.State): - - def __init__(self): - smach.State.__init__(self, outcomes=['succeeded', 'failed'], input_keys=['depth_topic', 'filter'], output_keys=['detections_3d']) - self.yolo = rospy.ServiceProxy('/yolov8/detect', YoloDetection) - self.tf = rospy.ServiceProxy("/tf_transform", TfTransform) - self.bridge = CvBridge() - self.n_published_markers = 0 - self.marker_point_pub = rospy.Publisher("/detect_objects_3d/points", Marker) - self.marker_text_pub = rospy.Publisher("/detect_objects_3d/labels", Marker) - - def estimate_pose(self, pcl_msg, detection): - centroid_xyz = seg_to_centroid(pcl_msg, np.array(detection.xyseg)) - centroid = PointStamped() - centroid.point = Point(*centroid_xyz) - centroid.header = pcl_msg.header - tf_req = TfTransformRequest() - tf_req.target_frame = String("map") - tf_req.point = centroid - response = self.tf(tf_req) - return np.array([response.target_point.point.x, response.target_point.point.y, response.target_point.point.z]) - - def execute(self, userdata): - pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) - try: - cv_im = pcl_msg_to_cv2(pcl_msg) - img_msg = self.bridge.cv2_to_imgmsg(cv_im) - result = self.yolo(img_msg, "yolov8n-seg.pt", 0.5, 0.3) - result.detected_objects = [det for det in result.detected_objects if det.name in userdata.filter] - result = [(detection, self.estimate_pose(pcl_msg, detection)) for detection in result.detected_objects] - for detection, point in result: - marker_point_msg = Marker() - marker_point_msg.header.frame_id = "map" - marker_point_msg.header.stamp = rospy.Time.now() - marker_point_msg.id = self.n_published_markers - marker_point_msg.type = Marker.SPHERE - marker_point_msg.action = Marker.ADD - marker_point_msg.pose.position.x = point[0] - marker_point_msg.pose.position.y = point[1] - marker_point_msg.pose.position.z = point[2] - marker_point_msg.color.r = 1.0 - marker_point_msg.color.g = 0.0 - marker_point_msg.color.b = 0.0 - marker_point_msg.scale.x = 0.1 - marker_point_msg.scale.y = 0.1 - marker_point_msg.scale.z = 0.1 - marker_point_msg.color.a = 1.0 - self.marker_point_pub.publish(marker_point_msg) - - marker_text_msg = Marker() - marker_text_msg.header.frame_id = "map" - marker_text_msg.header.stamp = rospy.Time.now() - marker_text_msg.id = self.n_published_markers - marker_text_msg.type = Marker.TEXT_VIEW_FACING - marker_text_msg.action = Marker.ADD - marker_text_msg.pose.position.x = point[0] - marker_text_msg.pose.position.y = point[1] - marker_text_msg.pose.position.z = point[2] + 0.15 - marker_text_msg.text = detection.name - marker_text_msg.color.r = 1.0 - marker_text_msg.color.g = 0.0 - marker_text_msg.color.b = 0.0 - marker_text_msg.scale.x = 0.1 - marker_text_msg.scale.y = 0.1 - marker_text_msg.scale.z = 0.1 - marker_text_msg.color.a = 1.0 - self.marker_text_pub.publish(marker_text_msg) - - - self.n_published_markers += 1 - - userdata.detections_3d = result - return 'succeeded' - except rospy.ServiceException as e: - rospy.logwarn(f"Unable to perform inference. ({str(e)})") - return 'failed' \ No newline at end of file diff --git a/skills/src/lasr_skills/detect_objects_in_area_3d.py b/skills/src/lasr_skills/detect_objects_in_area_3d.py deleted file mode 100644 index 009496741..000000000 --- a/skills/src/lasr_skills/detect_objects_in_area_3d.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env python3 - -import smach - -from lasr_skills import DetectObjects3D -from lasr_shapely import LasrShapely - - -class DetectObjectsInArea3D(smach.StateMachine): - - class FilterDetections(smach.State): - - def __init__(self): - smach.State.__init__(self, outcomes=['succeeded', 'failed'], input_keys=['area_polygon','detections_3d'], output_keys=['detections_3d']) - self.shapely = LasrShapely() - - def execute(self, userdata): - satisfied_points = self.shapely.are_points_in_polygon_2d(userdata.area_polygon, [[pose[0], pose[1]] for (_, pose) in userdata.detections_3d]).inside - filtered_detections = [userdata.detections_3d[i] for i in range(0, len(userdata.detections_3d)) if satisfied_points[i]] - userdata.detections_3d = filtered_detections - - return 'succeeded' - - def __init__(self): - smach.StateMachine.__init__(self, outcomes=['succeeded', 'failed'], input_keys=['pcl_msg', 'area_polygon', 'filter'], output_keys=['detections_3d']) - - with self: - - smach.StateMachine.add('DETECT_OBJECTS_3D', DetectObjects3D(), transitions={'succeeded': 'FILTER_DETECTIONS', 'failed' : 'failed'}) - smach.StateMachine.add('FILTER_DETECTIONS', self.FilterDetections(), transitions={'succeeded' : 'succeeded', 'failed' : 'failed'}) \ No newline at end of file diff --git a/skills/src/lasr_skills/detect_people.py b/skills/src/lasr_skills/detect_people.py deleted file mode 100755 index 7c46dfc7b..000000000 --- a/skills/src/lasr_skills/detect_people.py +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python3 - -from lasr_skills import DetectObjects - -class DetectPeople(DetectObjects): - - def __init__(self): - super().__init__() - self.userdata.filter = ["person"] \ No newline at end of file diff --git a/skills/src/lasr_skills/detect_people_3d.py b/skills/src/lasr_skills/detect_people_3d.py deleted file mode 100755 index 2fe1d383d..000000000 --- a/skills/src/lasr_skills/detect_people_3d.py +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python3 - -from lasr_skills import DetectObjects3D - -class DetectPeople3D(DetectObjects3D): - - def __init__(self): - super().__init__() - self.userdata.filter = ["person"] \ No newline at end of file diff --git a/skills/src/lasr_skills/detect_people_in_area_3d.py b/skills/src/lasr_skills/detect_people_in_area_3d.py deleted file mode 100755 index 05c3089c1..000000000 --- a/skills/src/lasr_skills/detect_people_in_area_3d.py +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python3 - -from lasr_skills import DetectObjectsInArea3D - -class DetectPeopleInArea3D(DetectObjectsInArea3D): - - def __init__(self): - super().__init__() - self.userdata.filter = ["person"] \ No newline at end of file diff --git a/skills/src/lasr_skills/detect_pointing.py b/skills/src/lasr_skills/detect_pointing.py new file mode 100644 index 000000000..95f2ebe6c --- /dev/null +++ b/skills/src/lasr_skills/detect_pointing.py @@ -0,0 +1,17 @@ +import rospy +import smach +from sensor_msgs.msg import Image + +from lasr_vision_msgs.srv import PointingDirection + +class PointingDetector(smach.State): + def __init__(self): + smach.State.__init__(self, outcomes=['succeeded', 'failed'], output_keys=['direction']) + self.service = rospy.ServiceProxy('/pointing_detection_service', PointingDirection) + self.image_raw = rospy.wait_for_message('/xtion/rgb/image_raw', Image) + + def execute(self, userdata): + resp = self.service(self.image_raw) + userdata.direction = resp.direction + + return 'succeeded' if resp.direction != 'Err' else 'failed' \ No newline at end of file diff --git a/skills/src/lasr_skills/go_to_location.py b/skills/src/lasr_skills/go_to_location.py index 00e857e15..562aeaf61 100755 --- a/skills/src/lasr_skills/go_to_location.py +++ b/skills/src/lasr_skills/go_to_location.py @@ -1,31 +1,144 @@ -#!/usr/bin/env python3 - -import rospy +import smach_ros import smach -from tiago_controllers.controllers import Controllers -from geometry_msgs.msg import Point, Quaternion, Pose - -class GoToLocation(smach.State): - - def __init__(self): - smach.State.__init__(self, outcomes=['succeeded', 'failed'], input_keys=['location']) - self.controllers = Controllers() - - def execute(self, userdata): - try: - status = self.controllers.base_controller.sync_to_pose(userdata.location) - if status: - return 'succeeded' - return 'failed' - except rospy.ERROR as e: - rospy.logwarn(f"Unable to go to location. {userdata.location} -> ({str(e)})") - return 'failed' - -if __name__ == '__main__': - rospy.init_node('go_to_location') - sm = smach.StateMachine(outcomes=['succeeded', 'failed']) - loc = rospy.get_param('/living_room/table/location') - sm.userdata.location = Pose(position=Point(**loc['position']), orientation=Quaternion(**loc['orientation'])) - with sm: - smach.StateMachine.add('GoToLocation', GoToLocation(), transitions={'succeeded': 'succeeded', 'failed': 'failed'}) - sm.execute() \ No newline at end of file +import rospy +import rosservice +from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal +from geometry_msgs.msg import Pose, PoseStamped +from std_msgs.msg import Header + +from typing import Union + +from lasr_skills import PlayMotion + +PUBLIC_CONTAINER = False + +try: + from pal_startup_msgs.srv import ( + StartupStart, + StartupStop, + StartupStartRequest, + StartupStopRequest, + ) +except ModuleNotFoundError: + PUBLIC_CONTAINER = True + + +class GoToLocation(smach.StateMachine): + + def __init__(self, location: Union[Pose, None] = None): + super(GoToLocation, self).__init__(outcomes=["succeeded", "failed"]) + + IS_SIMULATION = ( + "/pal_startup_control/start" not in rosservice.get_service_list() + ) + + with self: + smach.StateMachine.add( + "LOWER_BASE", + PlayMotion("pre_navigation"), + transitions={ + "succeeded": ( + "ENABLE_HEAD_MANAGER" if not IS_SIMULATION else "GO_TO_LOCATION" + ), + "aborted": "failed", + "preempted": "failed", + }, + ) + + if not IS_SIMULATION: + + if PUBLIC_CONTAINER: + rospy.logwarn( + "You are using a public container. The head manager will not be stopped during navigation." + ) + else: + smach.StateMachine.add( + "ENABLE_HEAD_MANAGER", + smach_ros.ServiceState( + "/pal_startup_control/start", + StartupStart, + request=StartupStartRequest("head_manager", ""), + ), + transitions={ + "succeeded": "GO_TO_LOCATION", + "preempted": "failed", + "aborted": "failed", + }, + ) + + if location is not None: + smach.StateMachine.add( + "GO_TO_LOCATION", + smach_ros.SimpleActionState( + "move_base", + MoveBaseAction, + goal=MoveBaseGoal( + target_pose=PoseStamped( + pose=location, header=Header(frame_id="map") + ) + ), + ), + transitions={ + "succeeded": ( + "DISABLE_HEAD_MANAGER" + if not IS_SIMULATION + else "RAISE_BASE" + ), + "aborted": "failed", + "preempted": "failed", + }, + ) + else: + smach.StateMachine.add( + "GO_TO_LOCATION", + smach_ros.SimpleActionState( + "move_base", + MoveBaseAction, + goal_cb=lambda ud, _: MoveBaseGoal( + target_pose=PoseStamped( + pose=ud.location, header=Header(frame_id="map") + ) + ), + input_keys=["location"], + ), + transitions={ + "succeeded": ( + "DISABLE_HEAD_MANAGER" + if not IS_SIMULATION + else "RAISE_BASE" + ), + "aborted": "failed", + "preempted": "failed", + }, + ) + + if not IS_SIMULATION: + + if PUBLIC_CONTAINER: + rospy.logwarn( + "You are using a public container. The head manager will not be start following navigation." + ) + else: + smach.StateMachine.add( + "DISABLE_HEAD_MANAGER", + smach_ros.ServiceState( + "/pal_startup_control/stop", + StartupStop, + request=StartupStopRequest("head_manager"), + ), + transitions={ + "succeeded": "RAISE_BASE", + "aborted": "failed", + "preempted": "failed", + }, + ) + + smach.StateMachine.add( + "RAISE_BASE", + PlayMotion("post_navigation"), + transitions={ + "succeeded": "succeeded", + "aborted": "failed", + "preempted": "failed", + }, + ) diff --git a/skills/src/lasr_skills/go_to_semantic_location.py b/skills/src/lasr_skills/go_to_semantic_location.py index 5f1a59a50..007d801ca 100755 --- a/skills/src/lasr_skills/go_to_semantic_location.py +++ b/skills/src/lasr_skills/go_to_semantic_location.py @@ -1,30 +1,28 @@ -#!/usr/bin/env python3 - import rospy -import smach -from tiago_controllers.controllers import Controllers -from geometry_msgs.msg import Point, Quaternion, Pose +import smach_ros + +from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal +from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion +from std_msgs.msg import Header + -class GoToSemanticLocation(smach.State): +class GoToSemanticLocation(smach_ros.SimpleActionState): def __init__(self): - smach.State.__init__(self, outcomes=['succeeded', 'failed'], input_keys=['location']) - self.controllers = Controllers() + super(GoToSemanticLocation, self).__init__( + "move_base", + MoveBaseAction, + goal_cb=lambda ud, _: MoveBaseGoal( + target_pose=PoseStamped( + pose=self._to_pose(rospy.get_param(f"{ud.location}/location")), + header=Header(frame_id="map"), + ) + ), + input_keys=["location"], + ) - def execute(self, userdata): - loc = rospy.get_param(f"{userdata.location}/location") - try: - status = self.controllers.base_controller.sync_to_pose(Pose(position=Point(**loc['position']), orientation=Quaternion(**loc['orientation']))) - if status: - return 'succeeded' - return 'failed' - except rospy.ERROR as e: - rospy.logwarn(f"Unable to go to location. {loc} -> ({str(e)})") - return 'failed' -if __name__ == '__main__': - rospy.init_node('go_to_semantic_location') - sm = smach.StateMachine(outcomes=['succeeded', 'failed']) - sm.userdata.location = '/living_room/table' - with sm: - smach.StateMachine.add('GoToSemanticLocation', GoToSemanticLocation(), transitions={'succeeded': 'succeeded', 'failed': 'failed'}) - sm.execute() + def _to_pose(self, location): + return Pose( + position=Point(**location["position"]), + orientation=Quaternion(**location["orientation"]), + ) diff --git a/skills/src/lasr_skills/handover_object.py b/skills/src/lasr_skills/handover_object.py new file mode 100755 index 000000000..9104e2e8f --- /dev/null +++ b/skills/src/lasr_skills/handover_object.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python3 +import smach +import smach_ros + +from std_srvs.srv import Empty + +from lasr_skills import Say, ListenFor, PlayMotion + +import rospkg +import rosparam +import os + + +class HandoverObject(smach.StateMachine): + + def __init__(self): + smach.StateMachine.__init__( + self, outcomes=["succeeded", "failed"], input_keys=["object_name"] + ) + + r = rospkg.RosPack() + els = rosparam.load_file( + os.path.join(r.get_path("lasr_skills"), "config", "motions.yaml") + ) + for param, ns in els: + rosparam.upload_params(ns, param) + + with self: + smach.StateMachine.add( + "CLEAR_OCTOMAP", + smach_ros.ServiceState("clear_octomap", Empty), + transitions={ + "succeeded": "REACH_ARM", + "aborted": "failed", + "preempted": "failed", + }, + ) + smach.StateMachine.add( + "REACH_ARM", + PlayMotion(motion_name="reach_arm"), + transitions={ + "succeeded": "SAY_TAKE", + "aborted": "failed", + "preempted": "failed", + }, + ) + smach.StateMachine.add( + "SAY_TAKE", + Say( + format_str="Please grab the {} from my end-effector, and say `I am done`.", + ), + transitions={ + "succeeded": "LISTEN_DONE", + "aborted": "failed", + "preempted": "failed", + }, + remapping={"placeholders": "object_name"}, + ) + smach.StateMachine.add( + "LISTEN_DONE", + ListenFor("done"), + transitions={ + "succeeded": "OPEN_GRIPPER", + "not_done": "LISTEN_DONE", + "aborted": "failed", + "preempted": "failed", + }, + ) + smach.StateMachine.add( + "OPEN_GRIPPER", + PlayMotion(motion_name="open_gripper"), + transitions={ + "succeeded": "FOLD_ARM", + "aborted": "failed", + "preempted": "failed", + }, + ) + smach.StateMachine.add( + "FOLD_ARM", + PlayMotion(motion_name="home"), + transitions={ + "succeeded": "succeeded", + "aborted": "failed", + "preempted": "failed", + }, + ) + + +if __name__ == "__main__": + import rospy + + rospy.init_node("handover_object") + sm = HandoverObject() + sm.userdata.object_name = "cola" + outcome = sm.execute() + rospy.loginfo("Outcome: " + outcome) + rospy.signal_shutdown("Done") diff --git a/skills/src/lasr_skills/listen.py b/skills/src/lasr_skills/listen.py new file mode 100644 index 000000000..272f24b47 --- /dev/null +++ b/skills/src/lasr_skills/listen.py @@ -0,0 +1,15 @@ +#!/usr/bin/env python3 +import smach_ros +from lasr_speech_recognition_msgs.msg import ( + TranscribeSpeechAction, +) + + +class Listen(smach_ros.SimpleActionState): + def __init__(self): + smach_ros.SimpleActionState.__init__( + self, + "transcribe_speech", + TranscribeSpeechAction, + result_slots=["sequence"], + ) diff --git a/skills/src/lasr_skills/listen_for.py b/skills/src/lasr_skills/listen_for.py new file mode 100644 index 000000000..52df8b5ef --- /dev/null +++ b/skills/src/lasr_skills/listen_for.py @@ -0,0 +1,26 @@ +import smach_ros +from lasr_speech_recognition_msgs.msg import ( + TranscribeSpeechAction, + TranscribeSpeechGoal, +) + +from actionlib_msgs.msg import GoalStatus + + +class ListenFor(smach_ros.SimpleActionState): + def __init__(self, wake_word: str): + + def speech_result_cb(userdata, status, result): + if status == GoalStatus.SUCCEEDED: + if wake_word in result.sequence.lower(): + return "succeeded" + return "not_done" + return "aborted" + + smach_ros.SimpleActionState.__init__( + self, + "transcribe_speech", + TranscribeSpeechAction, + result_cb=speech_result_cb, + outcomes=["succeeded", "preempted", "aborted", "not_done"], + ) diff --git a/skills/src/lasr_skills/look_to_point.py b/skills/src/lasr_skills/look_to_point.py old mode 100644 new mode 100755 index 85790a2ed..81c633ad7 --- a/skills/src/lasr_skills/look_to_point.py +++ b/skills/src/lasr_skills/look_to_point.py @@ -1,20 +1,22 @@ -import smach -import actionlib -from control_msgs.msg import PointHeadGoal -from control_msgs.msg import PointHeadAction -from geometry_msgs.msg import Point +import smach_ros +from control_msgs.msg import PointHeadGoal, PointHeadAction +from geometry_msgs.msg import Point, PointStamped +from std_msgs.msg import Header -class LookToPoint(smach.State): - def __init__(self): - smach.State.__init__(self, outcomes=['succeeded'], input_keys=['point']) - self.point_head_client = actionlib.SimpleActionClient('/head_controller/point_head_action', PointHeadAction) - def execute(self, userdata): - ph_goal = PointHeadGoal() - ph_goal.max_velocity = 1.0 - ph_goal.pointing_frame = 'head_2_link' - ph_goal.pointing_axis = Point(1.0, 0.0, 0.0) - ph_goal.target.header.frame_id = 'map' - ph_goal.target.point = userdata.point - self.point_head_client.send_goal_and_wait(ph_goal) - return 'succeeded' \ No newline at end of file +class LookToPoint(smach_ros.SimpleActionState): + def __init__(self): + super(LookToPoint, self).__init__( + "/head_controller/point_head_action", + PointHeadAction, + goal_cb=lambda ud, _: PointHeadGoal( + pointing_frame="head_2_link", + pointing_axis=Point(1.0, 0.0, 0.0), + max_velocity=1.0, + target=PointStamped( + header=Header(frame_id="map"), + point=ud.point, + ), + ), + input_keys=["point"], + ) diff --git a/skills/src/lasr_skills/play_motion.py b/skills/src/lasr_skills/play_motion.py new file mode 100644 index 000000000..a24e480d2 --- /dev/null +++ b/skills/src/lasr_skills/play_motion.py @@ -0,0 +1,45 @@ +import smach_ros +import rospy + +from play_motion_msgs.msg import PlayMotionAction, PlayMotionGoal + +from typing import Union, List + + +class PlayMotion(smach_ros.SimpleActionState): + + def _needs_planning(self, motion_name: str) -> bool: + joints: List[str] = rospy.get_param( + f"/play_motion/motions/{motion_name}/joints" + ) + needs_planning: bool = any( + "arm" in joint or "gripper" in joint for joint in joints + ) + + print(f"Motion {motion_name} needs planning: {needs_planning}") + + return needs_planning + + def __init__(self, motion_name: Union[str, None] = None): + # TODO: the play motion action server is always returning 'aborted', figure out what's going on + if motion_name is not None: + super(PlayMotion, self).__init__( + "play_motion", + PlayMotionAction, + goal=PlayMotionGoal( + motion_name=motion_name, + skip_planning=not self._needs_planning(motion_name), + ), + result_cb=lambda _, __, ___: "succeeded", + ) + else: + super(PlayMotion, self).__init__( + "play_motion", + PlayMotionAction, + goal_cb=lambda ud, _: PlayMotionGoal( + motion_name=ud.motion_name, + skip_planning=not self._needs_planning(ud.motion_name), + ), + input_keys=["motion_name"], + result_cb=lambda _, __, ___: "succeeded", + ) diff --git a/skills/src/lasr_skills/receive_object.py b/skills/src/lasr_skills/receive_object.py new file mode 100755 index 000000000..45864e61b --- /dev/null +++ b/skills/src/lasr_skills/receive_object.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python3 +import smach +import smach_ros + +from std_srvs.srv import Empty + +from lasr_skills import Say, ListenFor, PlayMotion + +import rospkg +import rosparam +import os + + +class ReceiveObject(smach.StateMachine): + + def __init__(self): + smach.StateMachine.__init__( + self, outcomes=["succeeded", "failed"], input_keys=["object_name"] + ) + + r = rospkg.RosPack() + els = rosparam.load_file( + os.path.join(r.get_path("lasr_skills"), "config", "motions.yaml") + ) + for param, ns in els: + rosparam.upload_params(ns, param) + + with self: + smach.StateMachine.add( + "CLEAR_OCTOMAP", + smach_ros.ServiceState("clear_octomap", Empty), + transitions={ + "succeeded": "REACH_ARM", + "aborted": "failed", + "preempted": "failed", + }, + ) + smach.StateMachine.add( + "REACH_ARM", + PlayMotion(motion_name="reach_arm"), + transitions={ + "succeeded": "OPEN_GRIPPER", + "aborted": "failed", + "preempted": "failed", + }, + ) + smach.StateMachine.add( + "OPEN_GRIPPER", + PlayMotion(motion_name="open_gripper"), + transitions={ + "succeeded": "SAY_PLACE", + "aborted": "failed", + "preempted": "failed", + }, + ) + smach.StateMachine.add( + "SAY_PLACE", + Say( + format_str="Please place the {} in my end-effector, and say `I am done`.", + ), + transitions={ + "succeeded": "LISTEN_DONE", + "aborted": "failed", + "preempted": "failed", + }, + remapping={"placeholders": "object_name"}, + ) + smach.StateMachine.add( + "LISTEN_DONE", + ListenFor("done"), + transitions={ + "succeeded": "CLOSE_GRIPPER", + "not_done": "LISTEN_DONE", + "aborted": "failed", + "preempted": "failed", + }, + ) + smach.StateMachine.add( + "CLOSE_GRIPPER", + smach_ros.ServiceState("parallel_gripper_controller/grasp", Empty), + transitions={ + "succeeded": "FOLD_ARM", + "aborted": "failed", + "preempted": "failed", + }, + ) + smach.StateMachine.add( + "FOLD_ARM", + PlayMotion(motion_name="home"), + transitions={ + "succeeded": "succeeded", + "aborted": "failed", + "preempted": "failed", + }, + ) + + +if __name__ == "__main__": + import rospy + + rospy.init_node("receive_object") + sm = ReceiveObject() + sm.userdata.object_name = "cola" + outcome = sm.execute() + rospy.loginfo("Outcome: " + outcome) + rospy.signal_shutdown("Done") diff --git a/skills/src/lasr_skills/say.py b/skills/src/lasr_skills/say.py new file mode 100644 index 000000000..5e0bddb8d --- /dev/null +++ b/skills/src/lasr_skills/say.py @@ -0,0 +1,45 @@ +import smach_ros + +from pal_interaction_msgs.msg import TtsGoal, TtsAction, TtsText + +from typing import Union + + +class Say(smach_ros.SimpleActionState): + def __init__( + self, text: Union[str, None] = None, format_str: Union[str, None] = None + ): + if text is not None: + super(Say, self).__init__( + "tts", + TtsAction, + goal=TtsGoal(rawtext=TtsText(text=text, lang_id="en_GB")), + ) + elif format_str is not None: + super(Say, self).__init__( + "tts", + TtsAction, + goal_cb=lambda ud, _: ( + TtsGoal( + rawtext=TtsText( + text=format_str.format(*ud.placeholders), lang_id="en_GB" + ) + ) + if isinstance(ud.placeholders, (list, tuple)) + else TtsGoal( + rawtext=TtsText( + text=format_str.format(ud.placeholders), lang_id="en_GB" + ) + ) + ), + input_keys=["placeholders"], + ) + else: + super(Say, self).__init__( + "tts", + TtsAction, + goal_cb=lambda ud, _: TtsGoal( + rawtext=TtsText(text=ud.text, lang_id="en_GB") + ), + input_keys=["text"], + ) diff --git a/skills/src/lasr_skills/wait_for_person.py b/skills/src/lasr_skills/wait_for_person.py index 68aacc469..ada04b8a2 100755 --- a/skills/src/lasr_skills/wait_for_person.py +++ b/skills/src/lasr_skills/wait_for_person.py @@ -1,26 +1,39 @@ -#!/usr/bin/env python3 - import smach -from lasr_skills import DetectPeople +from lasr_skills import Detect + class WaitForPerson(smach.StateMachine): class CheckForPerson(smach.State): def __init__(self): - smach.State.__init__(self, outcomes=['done', 'not_done'], input_keys=['detections']) + smach.State.__init__( + self, outcomes=["done", "not_done"], input_keys=["detections"] + ) def execute(self, userdata): if len(userdata.detections.detected_objects): - return 'done' + return "done" else: - return 'not_done' - + return "not_done" + def __init__(self): - smach.StateMachine.__init__(self, outcomes=['succeeded', 'failed'], input_keys=['depth_topic'], output_keys=['detections']) + smach.StateMachine.__init__( + self, + outcomes=["succeeded", "failed"], + output_keys=["detections"], + ) with self: - smach.StateMachine.add('DETECT_PEOPLE', DetectPeople(), transitions={'succeeded' : 'CHECK_FOR_PERSON', 'failed' : 'failed'}) - smach.StateMachine.add('CHECK_FOR_PERSON', self.CheckForPerson(), transitions={'done' : 'succeeded', 'not_done' : 'DETECT_PEOPLE'}) \ No newline at end of file + smach.StateMachine.add( + "DETECT_PEOPLE", + Detect(filter=["person"]), + transitions={"succeeded": "CHECK_FOR_PERSON", "failed": "failed"}, + ) + smach.StateMachine.add( + "CHECK_FOR_PERSON", + self.CheckForPerson(), + transitions={"done": "succeeded", "not_done": "DETECT_PEOPLE"}, + ) diff --git a/skills/src/lasr_skills/wait_for_person_in_area.py b/skills/src/lasr_skills/wait_for_person_in_area.py index 1ce3bcd98..a203e98b0 100755 --- a/skills/src/lasr_skills/wait_for_person_in_area.py +++ b/skills/src/lasr_skills/wait_for_person_in_area.py @@ -1,25 +1,40 @@ -#!/usr/bin/env python3 - import smach -from lasr_skills import DetectPeopleInArea3D +from lasr_skills import Detect3DInArea + +from shapely.geometry.polygon import Polygon + class WaitForPersonInArea(smach.StateMachine): class CheckForPerson(smach.State): def __init__(self): - smach.State.__init__(self, outcomes=['done', 'not_done'], input_keys=['detections_3d']) + smach.State.__init__( + self, outcomes=["done", "not_done"], input_keys=["detections_3d"] + ) def execute(self, userdata): if len(userdata.detections_3d): - return 'done' + return "done" else: - return 'not_done' - - def __init__(self): - smach.StateMachine.__init__(self, outcomes=['succeeded', 'failed'], input_keys=['depth_topic', 'area_polygon'], output_keys=['detections_3d']) + return "not_done" + + def __init__(self, area_polygon: Polygon): + smach.StateMachine.__init__( + self, + outcomes=["succeeded", "failed"], + output_keys=["detections_3d"], + ) with self: - smach.StateMachine.add('DETECT_PEOPLE_3D', DetectPeopleInArea3D(), transitions={'succeeded' : 'CHECK_FOR_PERSON', 'failed' : 'failed'}) - smach.StateMachine.add('CHECK_FOR_PERSON', self.CheckForPerson(), transitions={'done' : 'succeeded', 'not_done' : 'DETECT_PEOPLE_3D'}) \ No newline at end of file + smach.StateMachine.add( + "DETECT_PEOPLE_3D", + Detect3DInArea(area_polygon=area_polygon, filter=["person"]), + transitions={"succeeded": "CHECK_FOR_PERSON", "failed": "failed"}, + ) + smach.StateMachine.add( + "CHECK_FOR_PERSON", + self.CheckForPerson(), + transitions={"done": "succeeded", "not_done": "DETECT_PEOPLE_3D"}, + ) diff --git a/skills/src/lasr_skills/xml_question_answer.py b/skills/src/lasr_skills/xml_question_answer.py new file mode 100644 index 000000000..abede91b5 --- /dev/null +++ b/skills/src/lasr_skills/xml_question_answer.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 + +import rospy +import smach +import xml.etree.ElementTree as ET +from lasr_voice import Voice + +from lasr_vector_databases_msgs.srv import TxtQuery, TxtQueryRequest + + +def parse_question_xml(xml_file_path: str) -> dict: + """Parses the GPSR Q/A xml file and returns a dictionary + consisting of two lists, one for questions and one for answers, + where the index of each question corresponds to the index of its + corresponding answer. + + Args: + xml_file_path (str): full path to xml file to parse + + Returns: + dict: dictionary with keys "questions" and "answers" + each of which is a list of strings. + """ + tree = ET.parse(xml_file_path) + root = tree.getroot() + parsed_questions = [] + parsed_answers = [] + for q_a in root: + question = q_a.find("q").text + answer = q_a.find("a").text + parsed_questions.append(question) + parsed_answers.append(answer) + + return {"questions": parsed_questions, "answers": parsed_answers} + + +class XmlQuestionAnswer(smach.State): + + def __init__(self, index_path: str, txt_path: str, xml_path: str, k: int = 1): + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + input_keys=["query_sentence", "k"], + output_keys=["closest_answers"], + ) + self.index_path = index_path + self.txt_path = txt_path + self.xml_path = xml_path + self.k = k + self.txt_query = rospy.ServiceProxy("/lasr_faiss/txt_query", TxtQuery) + + def execute(self, userdata): + rospy.wait_for_service("/lasr_faiss/txt_query") + q_a_dict: dict = parse_question_xml(self.xml_path) + try: + request = TxtQueryRequest( + self.txt_path, + self.index_path, + userdata.query_sentence, + self.k, + ) + result = self.txt_query(request) + answers = [ + q_a_dict["answers"][q_a_dict["questions"].index(q)] + for q in result.closest_sentences + ] + userdata.closest_answers = answers + return "succeeded" + except rospy.ServiceException as e: + return "failed" diff --git a/legacy/listen_module/src/__init__.py b/tasks/coffee_shop/CATKIN_IGNORE similarity index 100% rename from legacy/listen_module/src/__init__.py rename to tasks/coffee_shop/CATKIN_IGNORE diff --git a/tasks/coffee_shop/package.xml b/tasks/coffee_shop/package.xml index 6b054e0d9..28038923d 100644 --- a/tasks/coffee_shop/package.xml +++ b/tasks/coffee_shop/package.xml @@ -7,7 +7,7 @@ - Jared Swift + Jared Swift Peter Tisnikar Paul Makles diff --git a/legacy/people_detection/create_dataset/CMakeLists.txt b/tasks/gpsr/CMakeLists.txt similarity index 92% rename from legacy/people_detection/create_dataset/CMakeLists.txt rename to tasks/gpsr/CMakeLists.txt index 3689a14ab..af1f6c908 100644 --- a/legacy/people_detection/create_dataset/CMakeLists.txt +++ b/tasks/gpsr/CMakeLists.txt @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 3.0.2) -project(create_dataset) +project(gpsr) ## Compile as C++11, supported in ROS Kinetic and newer # add_compile_options(-std=c++11) @@ -7,10 +7,7 @@ project(create_dataset) ## Find catkin macros and libraries ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) ## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - rospy - std_msgs -) +find_package(catkin REQUIRED) ## System dependencies are found with CMake's conventions # find_package(Boost REQUIRED COMPONENTS system) @@ -54,9 +51,7 @@ catkin_python_setup() ## Generate services in the 'srv' folder # add_service_files( -# FILES -# Service1.srv -# Service2.srv +# FILES # ) ## Generate actions in the 'action' folder @@ -103,8 +98,8 @@ catkin_python_setup() ## DEPENDS: system dependencies of this project that dependent projects also need catkin_package( # INCLUDE_DIRS include -# LIBRARIES create_dataset -# CATKIN_DEPENDS rospy std_msgs +# LIBRARIES coffee_shop +# CATKIN_DEPENDS other_catkin_pkg # DEPENDS system_lib ) @@ -116,12 +111,12 @@ catkin_package( ## Your package locations should be listed before other locations include_directories( # include - ${catkin_INCLUDE_DIRS} +# ${catkin_INCLUDE_DIRS} ) ## Declare a C++ library # add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/create_dataset.cpp +# src/${PROJECT_NAME}/coffee_shop.cpp # ) ## Add cmake target dependencies of the library @@ -132,7 +127,7 @@ include_directories( ## Declare a C++ executable ## With catkin_make all packages are built within a single CMake context ## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/create_dataset_node.cpp) +# add_executable(${PROJECT_NAME}_node src/coffee_shop_node.cpp) ## Rename C++ executable without prefix ## The above recommended prefix causes long target names, the following renames the @@ -158,10 +153,12 @@ include_directories( ## Mark executable scripts (Python etc.) for installation ## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) +catkin_install_python(PROGRAMS + scripts/parse_gpsr_xmls.py + nodes/commands/question_answer + nodes/command_parser + DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +) ## Mark executables for installation ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html @@ -186,8 +183,7 @@ include_directories( ## Mark other files for installation (e.g. launch and bag files, etc.) # install(FILES -# # myfile1 -# # myfile2 +# requirements.txt # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} # ) @@ -196,7 +192,7 @@ include_directories( ############# ## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_create_dataset.cpp) +# catkin_add_gtest(${PROJECT_NAME}-test test/test_coffee_shop.cpp) # if(TARGET ${PROJECT_NAME}-test) # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) # endif() diff --git a/legacy/listen_module/src/listen_module/__init__.py b/tasks/gpsr/data/.gitkeep similarity index 100% rename from legacy/listen_module/src/listen_module/__init__.py rename to tasks/gpsr/data/.gitkeep diff --git a/tasks/gpsr/data/mock_data/locations.json b/tasks/gpsr/data/mock_data/locations.json new file mode 100644 index 000000000..4ec74e87c --- /dev/null +++ b/tasks/gpsr/data/mock_data/locations.json @@ -0,0 +1,102 @@ +{ + "bed": { + "placeable": true, + "object_category": null + }, + "bedside table": { + "placeable": true, + "object_category": null + }, + "shelf": { + "placeable": true, + "object_category": "cleaning supplies" + }, + "trashbin": { + "placeable": false, + "object_category": null + }, + "dishwasher": { + "placeable": true, + "object_category": null + }, + "potted plant": { + "placeable": false, + "object_category": null + }, + "kitchen table": { + "placeable": true, + "object_category": "dishes" + }, + "chairs": { + "placeable": false, + "object_category": null + }, + "pantry": { + "placeable": true, + "object_category": "food" + }, + "refigerator": { + "placeable": true, + "object_category": null + }, + "sink": { + "placeable": true, + "object_category": null + }, + "cabinet": { + "placeable": true, + "object_category": "drinks" + }, + "coatrack": { + "placeable": false, + "object_category": null + }, + "desk": { + "placeable": true, + "object_category": "fruits" + }, + "armchair": { + "placeable": false, + "object_category": null + }, + "desk lamp": { + "placeable": false, + "object_category": null + }, + "waste basket": { + "placeable": false, + "object_category": null + }, + "tv stand": { + "placeable": true, + "object_category": null + }, + "storage rack": { + "placeable": true, + "object_category": null + }, + "lamp": { + "placeable": false, + "object_category": null + }, + "side tables": { + "placeable": true, + "object_category": "snacks" + }, + "sofa": { + "placeable": true, + "object_category": null + }, + "bookshelf": { + "placeable": true, + "object_category": "toys" + }, + "entrance": { + "placeable": false, + "object_category": null + }, + "exit": { + "placeable": false, + "object_category": null + } +} \ No newline at end of file diff --git a/tasks/gpsr/data/mock_data/names.json b/tasks/gpsr/data/mock_data/names.json new file mode 100644 index 000000000..7dbbe563f --- /dev/null +++ b/tasks/gpsr/data/mock_data/names.json @@ -0,0 +1,14 @@ +{ + "names": [ + "adel", + "angel", + "axel", + "charlie", + "janes", + "jules", + "morgan", + "paris", + "robin", + "simone" + ] +} \ No newline at end of file diff --git a/tasks/gpsr/data/mock_data/objects.json b/tasks/gpsr/data/mock_data/objects.json new file mode 100644 index 000000000..fec3bc340 --- /dev/null +++ b/tasks/gpsr/data/mock_data/objects.json @@ -0,0 +1,76 @@ +{ + "drinks": { + "items": [ + "orange juice", + "red wine", + "milk", + "iced tea", + "cola", + "tropical juice", + "juice pack" + ], + "singular": "drink" + }, + "fruits": { + "items": [ + "apple", + "pear", + "lemon", + "peach", + "banana", + "strawberry", + "orange", + "plum" + ], + "singular": "fruit" + }, + "snacks": { + "items": [ + "cheezit", + "cornflakes", + "pringles" + ], + "singular": "snack" + }, + "foods": { + "items": [ + "tuna", + "sugar", + "strawberry jello", + "tomato soup", + "mustard", + "chocolate jello", + "spam", + "coffee grounds" + ], + "singular": "food" + }, + "dishes": { + "items": [ + "plate", + "fork", + "spoon", + "cup", + "knife", + "bowl" + ], + "singular": "dish" + }, + "toys": { + "items": [ + "rubiks cube", + "soccer ball", + "dice", + "tennis ball", + "baseball" + ], + "singular": "toy" + }, + "cleaning supplies": { + "items": [ + "cleanser", + "sponge" + ], + "singular": "cleaning supply" + } +} \ No newline at end of file diff --git a/tasks/gpsr/data/mock_data/rooms.json b/tasks/gpsr/data/mock_data/rooms.json new file mode 100644 index 000000000..c880dac20 --- /dev/null +++ b/tasks/gpsr/data/mock_data/rooms.json @@ -0,0 +1,9 @@ +{ + "rooms": [ + "bedroom", + "kitchen", + "office", + "living room", + "bedroom" + ] +} \ No newline at end of file diff --git a/tasks/gpsr/launch/commands/question_answer.launch b/tasks/gpsr/launch/commands/question_answer.launch new file mode 100644 index 000000000..87de56930 --- /dev/null +++ b/tasks/gpsr/launch/commands/question_answer.launch @@ -0,0 +1,22 @@ + + + + + + + + + + + \ No newline at end of file diff --git a/tasks/gpsr/nodes/command_parser b/tasks/gpsr/nodes/command_parser new file mode 100644 index 000000000..ca350bfd2 --- /dev/null +++ b/tasks/gpsr/nodes/command_parser @@ -0,0 +1,225 @@ +#!/usr/bin/env python3 +import argparse +import smach +import rospy +from typing import Dict + +from gpsr.load_known_data import GPSRDataLoader +from gpsr.regex_command_parser import Configuration, gpsr_compile_and_parse +from lasr_skills import AskAndListen, Say + + +def parse_args() -> Dict: + parser = argparse.ArgumentParser(description="GPSR Command Parser") + parser.add_argument( + "--data-dir", + type=str, + default="../data/mock_data/", + help="Path to the directory that contains the data json files.", + ) + known, unknown = parser.parse_known_args() + return vars(known) + + +class ParseCommand(smach.State): + def __init__(self, data_config: Configuration): + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + input_keys=["transcribed_speech"], + output_keys=["command"], + ) + self.data_config = data_config + + def execute(self, userdata): + rospy.loginfo(f"Received command : {userdata.transcribed_speech.lower()}") + try: + userdata.command = gpsr_compile_and_parse( + self.data_config, userdata.transcribed_speech.lower() + ) + except Exception as e: + rospy.logerr(e) + return "failed" + return "succeeded" + + +class OutputParsedCommand(smach.State): + def __init__(self): + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + input_keys=["command"], + output_keys=["command_string"], + ) + + def _get_english_translation(self, command_dict: Dict) -> str: + translation_str = "" + + for index, command in enumerate(command_dict["commands"]): + command_paramaters = command_dict["command_params"][index] + print(f"Command: {command}, parameters: {command_paramaters}") + if index == 0: + translation_str += "First, you want me to " + else: + translation_str += "Then, you want me to " + guide = False + if command == "take": + if "object" in command_paramaters: + translation_str += f"take the {command_paramaters['object']} " + if "location" in command_paramaters: + translation_str += f"from the {command_paramaters['location']} " + elif "person" in command_paramaters: + translation_str += f"from {command_paramaters['person']} " + elif "room" in command_paramaters: + translation_str += f"from the {command_paramaters['room']} " + else: # take corresponds to guiding + guide = True + elif command == "place": + translation_str += f"place the {command_paramaters['object']} on the {command_paramaters['location']} " + elif command == "deliver": + translation_str += f"deliver the {command_paramaters['object']} " + if "name" in command_paramaters: + translation_str += f"to {command_paramaters['name']} " + translation_str += f"in the {command_paramaters['location']} " + elif "gesture" in command_paramaters: + translation_str += ( + f"to the person who is {command_paramaters['gesture']} " + ) + translation_str += f"in the {command_paramaters['location']} " + else: + translation_str += f"to you." + elif command == "go": + translation_str += f"go to the " + if "location" in command_paramaters: + translation_str += f"{command_paramaters['location']} " + elif "room" in command_paramaters: + translation_str += f"{command_paramaters['room']} " + elif command == "find": + if "gesture" in command_paramaters: + translation_str += ( + f"find the person who is {command_paramaters['gesture']} " + ) + translation_str += f"in the {command_paramaters['location']} " + elif "object" in command_paramaters: + translation_str += f"find the {command_paramaters['object']} " + translation_str += f"in the {command_paramaters['location']} " + elif command == "talk": + pass + elif command == "answer": + pass + elif command == "meet": + translation_str += f"meet {command_paramaters['name']} " + if "location" in command_paramaters: + translation_str += f"in the {command_paramaters['location']} " + elif command == "tell": + pass + elif command == "answer": + pass + elif command == "meet": + pass + elif command == "tell": + pass + elif command == "greet": + pass + elif command == "remember": + pass + elif command == "count": + pass + elif command == "describe": + pass + elif command == "offer": + pass + elif command == "follow": + pass + elif command == "accompany": + pass + if command == "guide" or guide: + pass + + return translation_str + + def execute(self, userdata): + try: + command: Dict = userdata.command + tts_phrase = self._get_english_translation(command) + except Exception as e: + rospy.logerr(e) + return "failed" + rospy.loginfo(tts_phrase) + userdata.command_string = tts_phrase + return "succeeded" + + +class CommandParserStateMachine(smach.StateMachine): + def __init__(self, config: Configuration): + smach.StateMachine.__init__( + self, + outcomes=["succeeded", "failed"], + input_keys=["tts_phrase", "command_string"], + output_keys=["command"], + ) + self.config = config + with self: + smach.StateMachine.add( + "GET_COMMAND", + AskAndListen(), + transitions={"succeeded": "PARSE_COMMAND", "failed": "GET_COMMAND"}, + remapping={ + "tts_phrase": "tts_phrase", + "transcribed_speech": "transcribed_speech", + }, + ) + smach.StateMachine.add( + "PARSE_COMMAND", + ParseCommand(data_config=self.config), + transitions={ + "succeeded": "OUTPUT_PARSED_COMMAND", + "failed": "GET_COMMAND", + }, + remapping={ + "transcribed_speech": "transcribed_speech", + "command": "command", + }, + ) + smach.StateMachine.add( + "OUTPUT_PARSED_COMMAND", + OutputParsedCommand(), + transitions={ + "succeeded": "SAY_PARSED_COMMAND", + "failed": "GET_COMMAND", + }, + remapping={"command": "command", "tts_phrase": "tts_phrase"}, + ) + smach.StateMachine.add( + "SAY_PARSED_COMMAND", + Say(), + transitions={ + "succeeded": "GET_COMMAND", + "aborted": "GET_COMMAND", + "preempted": "GET_COMMAND", + }, + remapping={"text": "command_string"}, + ) + + +if __name__ == "__main__": + rospy.init_node("gpsr_command_parser") + args = parse_args() + data_loader = GPSRDataLoader(data_dir=args["data_dir"]) + gpsr_known_data: Dict = data_loader.load_data() + config = Configuration( + { + "person_names": gpsr_known_data["names"], + "location_names": gpsr_known_data["non_placeable_locations"], + "placement_location_names": gpsr_known_data["placeable_locations"], + "room_names": gpsr_known_data["rooms"], + "object_names": gpsr_known_data["objects"], + "object_categories_plural": gpsr_known_data["categories_plural"], + "object_categories_singular": gpsr_known_data["categories_singular"], + } + ) + rospy.loginfo("GPSR Command Parser: Initialized") + sm = CommandParserStateMachine(config) + sm.userdata.tts_phrase = "I am ready to receive a command; ask away!" + result = sm.execute() + rospy.spin() diff --git a/tasks/gpsr/nodes/commands/question_answer b/tasks/gpsr/nodes/commands/question_answer new file mode 100644 index 000000000..9c2301c6c --- /dev/null +++ b/tasks/gpsr/nodes/commands/question_answer @@ -0,0 +1,96 @@ +#!/usr/bin/env python3 +import rospy +import argparse +import smach +from lasr_skills.xml_question_answer import XmlQuestionAnswer +from lasr_skills.ask_and_listen import AskAndListen +from lasr_skills.say import Say + + +class QuestionAnswerStateMachine(smach.StateMachine): + def __init__(self, input_data: dict): + smach.StateMachine.__init__( + self, + outcomes=["succeeded", "failed"], + output_keys=["closest_answers"], + ) + self.userdata.tts_phrase = "I hear you have a question for me; ask away!" + with self: + smach.StateMachine.add( + "GET_QUESTION", + AskAndListen(), + transitions={"succeeded": "XML_QUESTION_ANSWER", "failed": "failed"}, + remapping={ + "tts_phrase:": "tts_phrase", + "transcribed_speech": "transcribed_speech", + }, + ) + smach.StateMachine.add( + "XML_QUESTION_ANSWER", + XmlQuestionAnswer( + input_data["index_path"], + input_data["txt_path"], + input_data["xml_path"], + input_data["k"], + ), + transitions={"succeeded": "SAY_ANSWER", "failed": "failed"}, + remapping={ + "query_sentence": "transcribed_speech", + "closest_answers": "closest_answers", + }, + ) + smach.StateMachine.add( + "SAY_ANSWER", + Say(format_str="The answer to your question is: {}"), + transitions={ + "succeeded": "succeeded", + "aborted": "failed", + "preempted": "failed", + }, + remapping={"placeholders": "closest_answers"}, + ) + + +def parse_args() -> dict: + parser = argparse.ArgumentParser(description="GPSR Question Answer") + parser.add_argument( + "--k", + type=int, + help="The number of closest answers to return", + required=True, + ) + parser.add_argument( + "--index_path", + type=str, + help="The path to the index file that is populated with the sentences embeddings of the questions", + required=True, + ) + parser.add_argument( + "--txt_path", + type=str, + help="The path to the txt file containing a list of questions.", + required=True, + ) + parser.add_argument( + "--xml_path", + type=str, + help="The path to the xml file containing question/answer pairs", + required=True, + ) + args, _ = parser.parse_known_args() + args.k = int(args.k) + return vars(args) + + +if __name__ == "__main__": + rospy.init_node("gpsr_question_answer") + args: dict = parse_args() + while not rospy.is_shutdown(): + q_a_sm = QuestionAnswerStateMachine(args) + outcome = q_a_sm.execute() + if outcome == "succeeded": + rospy.loginfo("Question Answer State Machine succeeded") + else: + rospy.logerr("Question Answer State Machine failed") + + rospy.spin() diff --git a/legacy/lasr_navigate_to_known_person/package.xml b/tasks/gpsr/package.xml similarity index 75% rename from legacy/lasr_navigate_to_known_person/package.xml rename to tasks/gpsr/package.xml index 3cd73d964..e94bc707c 100644 --- a/legacy/lasr_navigate_to_known_person/package.xml +++ b/tasks/gpsr/package.xml @@ -1,25 +1,28 @@ - lasr_navigate_to_known_person + gpsr 0.0.0 - The lasr_navigate_to_known_person package + The gpsr task package - elisabeth + Matt Barker + Siyao Li + Jared Swift + Nicole Lehchevska - TODO + MIT - + @@ -49,14 +52,13 @@ catkin - rospy - rospy - rospy - - + lasr_vector_databases_faiss + lasr_vector_databases_msgs + lasr_speech_recognition_whisper + - + diff --git a/tasks/gpsr/scripts/parse_gpsr_xmls.py b/tasks/gpsr/scripts/parse_gpsr_xmls.py new file mode 100644 index 000000000..e85fdf706 --- /dev/null +++ b/tasks/gpsr/scripts/parse_gpsr_xmls.py @@ -0,0 +1,27 @@ +import xml.etree.ElementTree as ET + + +def parse_question_xml(xml_file_path: str) -> dict: + """Parses the GPSR Q/A xml file and returns a dictionary + consisting of two lists, one for questions and one for answers, + where the index of each question corresponds to the index of its + corresponding answer. + + Args: + xml_file_path (str): full path to xml file to parse + + Returns: + dict: dictionary with keys "questions" and "answers" + each of which is a list of strings. + """ + tree = ET.parse(xml_file_path) + root = tree.getroot() + parsed_questions = [] + parsed_answers = [] + for q_a in root: + question = q_a.find("q").text + answer = q_a.find("a").text + parsed_questions.append(question) + parsed_answers.append(answer) + + return {"questions": parsed_questions, "answers": parsed_answers} diff --git a/tasks/gpsr/setup.py b/tasks/gpsr/setup.py new file mode 100644 index 000000000..386c709e5 --- /dev/null +++ b/tasks/gpsr/setup.py @@ -0,0 +1,8 @@ +#!/usr/bin/env python3 + +from distutils.core import setup +from catkin_pkg.python_setup import generate_distutils_setup + +setup_args = generate_distutils_setup(packages=["gpsr"], package_dir={"": "src"}) + +setup(**setup_args) diff --git a/tasks/gpsr/src/gpsr/load_known_data.py b/tasks/gpsr/src/gpsr/load_known_data.py new file mode 100644 index 000000000..b2ac81184 --- /dev/null +++ b/tasks/gpsr/src/gpsr/load_known_data.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python + +""" +GPSR regex parser (and command generator) requires a list of the following names: + + - objects + - object categories plural + - object categories singular + - placeable locations + - non-placeable locations + - people + - rooms +""" + +import json +import os +from typing import List, Tuple, Dict + + +class GPSRDataLoader: + """Loads the a priori known data for the GPSR task from the + corresponding json files.""" + + def __init__(self, data_dir: str = "../data/mock_data/") -> None: + """Stores the data directory that contains the json files to load. + + Args: + data_dir (str, optional): Path to the directory that contains the + data json files. Defaults to "../data/mock_data/". + """ + self._data_dir: str = data_dir + + # assumed constant file names + self.LOCATIONS_FILE: str = "locations.json" + self.OBJECTS_FILE: str = "objects.json" + self.NAMES_FILE: str = "names.json" + self.ROOMS_FILE: str = "rooms.json" + + if not self._validate_dir(): + raise ValueError( + "The data directory does not contain all of the necessary json files." + ) + + def _validate_dir(self) -> bool: + """Checks if all of the necessary json files are present in the + data directory.""" + + if not all( + [ + self.LOCATIONS_FILE in os.listdir(self._data_dir), + self.OBJECTS_FILE in os.listdir(self._data_dir), + self.NAMES_FILE in os.listdir(self._data_dir), + self.ROOMS_FILE in os.listdir(self._data_dir), + ] + ): + return False + return True + + def _load_names(self) -> List[str]: + """Loads the names stored in the names json into a list of strings. + + Returns: + List[str]: list of names found in the names.json file + """ + + with open(os.path.join(self._data_dir, self.NAMES_FILE), "r") as f: + names = json.load(f) + return names[ + "names" + ] # names is a dictionary with a single key "names" that contains a list of names + + def _load_rooms(self) -> List[str]: + """Loads the rooms stored in the rooms json into a list of strings. + + Returns: + List[str]: list of rooms found in the rooms.json file + """ + + with open(os.path.join(self._data_dir, self.ROOMS_FILE), "r") as f: + rooms = json.load(f) + return rooms[ + "rooms" + ] # rooms is a dictionary with a single key "rooms" that contains a list of rooms + + def _load_locations(self) -> Tuple[List[str], List[str]]: + """Loads the locations into two lists of strings of location names: + one list for placeable locations and the other for non-placeable. + + Returns: + Tuple[List[str], List[str]]: placeable, non-placeable locations. + """ + + placeable_locations: List[str] = [] + non_placeable_locations: List[str] = [] + + with open(os.path.join(self._data_dir, self.LOCATIONS_FILE), "r") as f: + locations = json.load(f) + + for location, attributes in locations.items(): + if attributes["placeable"]: + placeable_locations.append(location) + non_placeable_locations.append(location) + + return placeable_locations, non_placeable_locations + + def _load_objects(self) -> Tuple[List[str], List[str], List[str]]: + """Loads the objects into three lists of strings: + one list for object names, one for the plural names of object categories, + and one for the singular names of object categories. + + Returns: + Tuple[List[str], List[str], List[str]]: objects, categories plural, categories singular + """ + + objects: List[str] = [] + categories_plural: List[str] = [] + categories_singular: List[str] = [] + + with open(os.path.join(self._data_dir, self.OBJECTS_FILE), "r") as f: + object_data = json.load(f) + + for category, category_data in object_data.items(): + categories_plural.append(category) + categories_singular.append(category_data["singular"]) + objects.extend(category_data["items"]) + + return objects, categories_plural, categories_singular + + def load_data(self) -> Dict[str, List[str]]: + """Loads all of the known data for the GPSR task. + + Returns: + Dict[str, List[str]]: dictionary containing the following + keys and their corresponding lists of strings: + - "names" + - "rooms" + - "placeable_locations" + - "non_placeable_locations" + - "objects" + - "categories_plural" + - "categories_singular" + """ + + names = self._load_names() + rooms = self._load_rooms() + placeable_locations, non_placeable_locations = self._load_locations() + objects, categories_plural, categories_singular = self._load_objects() + + return { + "names": names, + "rooms": rooms, + "placeable_locations": placeable_locations, + "non_placeable_locations": non_placeable_locations, + "objects": objects, + "categories_plural": categories_plural, + "categories_singular": categories_singular, + } + + +if __name__ == "__main__": + loader = GPSRDataLoader() + data = loader.load_data() + print(data) diff --git a/tasks/gpsr/src/gpsr/regex_command_parser.py b/tasks/gpsr/src/gpsr/regex_command_parser.py new file mode 100644 index 000000000..ea091f052 --- /dev/null +++ b/tasks/gpsr/src/gpsr/regex_command_parser.py @@ -0,0 +1,556 @@ +import itertools +import re + +from typing import List, Union, TypedDict, Dict, Any + +counter = 0 +sub_command_counter = 0 +seen_sub_command_group_names = [] + + +def uniq(i: str) -> str: + global counter + counter += 1 + return f"uniq{counter}_{i}" + + +def list_to_regex(list: List[str], key: Union[str, None] = None): + if key is None: + return f"(?:{'|'.join(list)})" + + return f"(?P<{uniq(key)}>{'|'.join(list)})" + + +# data from gpsr_commands +verb_dict = { + "take": ["take", "get", "grasp", "fetch"], + "place": ["put", "place"], + "deliver": ["bring", "give", "deliver"], + "bring": ["bring", "give"], + "go": ["go", "navigate"], + "find": ["find", "locate", "look for"], + "talk": ["tell", "say"], + "answer": ["answer"], + "meet": ["meet"], + "tell": ["tell"], + "greet": ["greet", "salute", "say hello to", "introduce yourself to"], + # "remember": ["meet", "contact", "get to know", "get acquainted with"], <--- LOOKS UNUSED + "count": ["tell me how many"], + "describe": ["tell me how", "describe"], + "offer": ["offer"], + "follow": ["follow"], + "guide": ["guide", "escort", "take", "lead"], + "accompany": ["accompany"], +} + + +def verb(v): + # return list_to_regex(verb_dict[v], f"verb_{v}") + return list_to_regex(verb_dict[v], "verb") + # return list_to_regex(verb_dict[v], None if len(verb_dict[v]) == 1 else "verb") + + +prep_dict = { + "deliverPrep": ["to"], + "placePrep": ["on"], + "inLocPrep": ["in"], + "fromLocPrep": ["from"], + "toLocPrep": ["to"], + "atLocPrep": ["at"], + "talkPrep": ["to"], + "locPrep": ["in", "at"], + "onLocPrep": ["on"], + "arePrep": ["are"], + "ofPrsPrep": ["of"], +} + + +def prep(v: str): + return list_to_regex(prep_dict[v]) + # return list_to_regex(prep_dict[v], f"prep_{v}") + + +class Configuration(TypedDict): + person_names: List[str] + location_names: List[str] + placement_location_names: List[str] + room_names: List[str] + object_names: List[str] + object_categories_plural: List[str] + object_categories_singular: List[str] + + def key_to_list(self, key: str): + if key == "name": + return self["person_names"] + elif key == "loc": + return self["location_names"] + elif key == "loc2": + return self["location_names"] + # return ['loc2'] + elif key == "plcmtLoc": + return self["placement_location_names"] + elif key == "plcmtLoc2": + return self["placement_location_names"] + # return ['plcmtLoc2'] + elif key == "room": + return self["room_names"] + elif key == "room2": + return self["room_names"] + # return ['room2'] + elif key == "obj": + return self["object_names"] + elif key == "singCat": + return self["object_categories_singular"] + elif key == "plurCat": + return self["object_categories_plural"] + else: + raise Exception("unreachable") + + def pick(self, key: str, lists: List[str]): + union = [] + for list in lists: + if list == "inRoom": + union.append( + f"(?:{prep('inLocPrep')} the (?P<{uniq('location')}>{'|'.join(Configuration.key_to_list(self, 'room'))}))" + ) + elif list == "atLoc": + union.append( + f"(?:{prep('atLocPrep')} the (?P<{uniq('location')}>{'|'.join(Configuration.key_to_list(self, 'loc'))}))" + ) + else: + union = union + Configuration.key_to_list(self, list) + return f"(?P<{uniq(key)}>{'|'.join(union)})" + + +def gpsr_components(): + connector_list = ["and"] + gesture_person_list = [ + "waving person", + "person raising their left arm", + "person raising their right arm", + "person pointing to the left", + "person pointing to the right", + ] + pose_person_list = ["sitting person", "standing person", "lying person"] + # Ugly... + gesture_person_plural_list = [ + "waving persons", + "persons raising their left arm", + "persons raising their right arm", + "persons pointing to the left", + "persons pointing to the right", + ] + pose_person_plural_list = ["sitting persons", "standing persons", "lying persons"] + + person_info_list = ["name", "pose", "gesture"] + object_comp_list = [ + "biggest", + "largest", + "smallest", + "heaviest", + "lightest", + "thinnest", + ] + + talk_list = [ + "something about yourself", + "the time", + "what day is today", + "what day is tomorrow", + "your teams name", + "your teams country", + "your teams affiliation", + "the day of the week", + "the day of the month", + ] + question_list = ["question", "quiz"] + + color_list = ["blue", "yellow", "black", "white", "red", "orange", "gray"] + clothe_list = ["t shirt", "shirt", "blouse", "sweater", "coat", "jacket"] + clothes_list = ["t shirts", "shirts", "blouses", "sweaters", "coats", "jackets"] + color_clothe_list = [] + for a, b in list(itertools.product(color_list, clothe_list)): + color_clothe_list = color_clothe_list + [a + " " + b] + color_clothes_list = [] + for a, b in list(itertools.product(color_list, clothes_list)): + color_clothes_list = color_clothes_list + [a + " " + b] + + # convert lists to regex components + connector_list = list_to_regex(connector_list) + gesture_person_list = list_to_regex(gesture_person_list, "gesture") + pose_person_list = list_to_regex(pose_person_list, "pose") + gesture_person_plural_list = list_to_regex(gesture_person_plural_list, "gesture") + pose_person_plural_list = list_to_regex(pose_person_plural_list, "pose") + person_info_list = list_to_regex(person_info_list, "personinfo") + object_comp_list = list_to_regex(object_comp_list, "objectcomp") + talk_list = list_to_regex(talk_list, "talk") + question_list = list_to_regex(question_list, "question") + color_clothe_list = list_to_regex(color_clothe_list, "clothes") + color_clothes_list = list_to_regex(color_clothes_list, "clothes") + + return ( + verb, + prep, + "(?:an|a)", + connector_list, + gesture_person_list, + pose_person_list, + gesture_person_plural_list, + pose_person_plural_list, + person_info_list, + object_comp_list, + talk_list, + question_list, + color_clothe_list, + color_clothes_list, + ) + + +def gpsr_regex(configuration: Configuration): + ( + verb, + prep, + art, + connector_list, + gesture_person_list, + pose_person_list, + gesture_person_plural_list, + pose_person_plural_list, + person_info_list, + object_comp_list, + talk_list, + question_list, + color_clothe_list, + color_clothes_list, + ) = gpsr_components() + + commands = [] + + def command(key: str, matcher: str): + matcher = re.sub("\(\?P\<", f"(?P{matcher})") + + def get_possible_sub_commands(type: str) -> str: + sub_commands = [] + assertion_check = False + if type == "atLoc": + sub_commands.append( + f"{verb('find')} {art} {Configuration.pick(configuration, 'object', ['obj', 'singCat'])} and {get_possible_sub_commands('foundObj')}" + ) + sub_commands.append( + f"{verb('find')} the (?:{gesture_person_list}|{pose_person_list}) and {get_possible_sub_commands('foundPers')}" + ) + sub_commands.append( + f"{verb('meet')} {Configuration.pick(configuration, 'name', ['name'])} and {get_possible_sub_commands('foundPers')}" + ) + elif type == "hasObj": + sub_commands.append( + f"{verb('place')} it {prep('onLocPrep')} the {Configuration.pick(configuration, 'location', ['plcmtLoc'])}" + ) + sub_commands.append(f"{verb('deliver')} it to me") + sub_commands.append( + f"{verb('deliver')} it {prep('deliverPrep')} the (?:{gesture_person_list}|{pose_person_list}) {prep('inLocPrep')} the {Configuration.pick(configuration, 'location', ['room'])}" + ) + sub_commands.append( + f"{verb('deliver')} it {prep('deliverPrep')} {Configuration.pick(configuration, 'name', ['name'])} {prep('inLocPrep')} the {Configuration.pick(configuration, 'location', ['room'])}" + ) + elif type == "foundPers": + sub_commands.append(f"{verb('talk')} {talk_list}") + sub_commands.append(f"{verb('answer')} a {question_list}") + sub_commands.append(f"{verb('follow')} them") + sub_commands.append( + f"{verb('follow')} them {prep('toLocPrep')} the {Configuration.pick(configuration, 'location', ['room'])}" + ) + sub_commands.append( + f"{verb('guide')} them {prep('toLocPrep')} the {Configuration.pick(configuration, 'location', ['room'])}" + ) + elif type == "foundObj": + sub_commands.append( + f"{verb('take')} it and {get_possible_sub_commands('hasObj')}" + ) + assertion_check = True + + union = "|".join(sub_commands) + group_names = re.findall(r"\(\?P<([a-zA-Z0-9_]+)>", union) + global seen_sub_command_group_names + global sub_command_counter + for index, name in enumerate(group_names): + if name in seen_sub_command_group_names: + # make name unique + new_name = f"{name}_{sub_command_counter}" + seen_sub_command_group_names.append(new_name) + sub_command_counter += 1 + union = re.sub(rf"\(\?P<{name}>", f"(?P<{new_name}>", union) + else: + seen_sub_command_group_names.append(name) + + # groups = re.search(r"(\b[A-Z]+\b).+(\b\d+)", union) + return f"(?:{union})" + # return f"(?P<{uniq(key)}>{'|'.join(union)})" + + # print(get_possible_sub_commands("atLoc")) + command( + "goToLoc", + f"{verb('go')} {prep('toLocPrep')} the {Configuration.pick(configuration, 'location', ['loc', 'room'])} then {get_possible_sub_commands('atLoc')}", + ) + command( + "takeObjFromPlcmt", + f"{verb('take')} {art} {Configuration.pick(configuration, 'object', ['obj', 'singCat'])} {prep('fromLocPrep')} the {Configuration.pick(configuration, 'location', ['plcmtLoc'])} and {get_possible_sub_commands('hasObj')}", + ) + command( + "findPrsInRoom", + f"{verb('find')} a (?:{gesture_person_list}|{pose_person_list}) {prep('inLocPrep')} the {Configuration.pick(configuration, 'location', ['room'])} and {get_possible_sub_commands('foundPers')}", + ) + command( + "findObjInRoom", + f"{verb('find')} {art} {Configuration.pick(configuration, 'object', ['obj', 'singCat'])} {prep('inLocPrep')} the {Configuration.pick(configuration, 'location', ['room'])} then {get_possible_sub_commands('foundObj')}", + ) + command( + "meetPrsAtBeac", + f"{verb('meet')} {Configuration.pick(configuration, 'name', ['name'])} {prep('inLocPrep')} the {Configuration.pick(configuration, 'location', ['room'])} and {get_possible_sub_commands('foundPers')}", + ) + command( + "countObjOnPlcmt", + f"{verb('count')} {Configuration.pick(configuration, 'object', ['plurCat'])} there are {prep('onLocPrep')} the {Configuration.pick(configuration, 'location', ['plcmtLoc'])}", + ) + command( + "countPrsInRoom", + f"{verb('count')} (?:{gesture_person_plural_list}|{pose_person_plural_list}) are {prep('inLocPrep')} the {Configuration.pick(configuration, 'location', ['room'])}", + ) + command( + "tellPrsInfoInLoc", + f"{verb('tell')} me the {person_info_list} of the person {Configuration.pick(configuration, 'location', ['inRoom', 'atLoc'])}", + ) + command( + "tellObjPropOnPlcmt", + f"{verb('tell')} me what is the {object_comp_list} object {prep('onLocPrep')} the {Configuration.pick(configuration, 'location', ['plcmtLoc'])}", + ) + command( + "talkInfoToGestPrsInRoom", + f"{verb('talk')} {talk_list} {prep('talkPrep')} the {gesture_person_list} {prep('inLocPrep')} the {Configuration.pick(configuration, 'location', ['room'])}", + ) + command( + "answerToGestPrsInRoom", + f"{verb('answer')} the {question_list} {prep('ofPrsPrep')} the {gesture_person_list} {prep('inLocPrep')} the {Configuration.pick(configuration, 'location', ['room'])}", + ) + command( + "followNameFromBeacToRoom", + f"{verb('follow')} {Configuration.pick(configuration, 'name', ['name'])} {prep('fromLocPrep')} the {Configuration.pick(configuration, 'start', ['loc'])} {prep('toLocPrep')} the {Configuration.pick(configuration, 'end', ['room'])}", + ) + command( + "guideNameFromBeacToBeac", + f"{verb('guide')} {Configuration.pick(configuration, 'name', ['name'])} {prep('fromLocPrep')} the {Configuration.pick(configuration, 'start', ['loc'])} {prep('toLocPrep')} the {Configuration.pick(configuration, 'end', ['loc', 'room'])}", + ) + command( + "guidePrsFromBeacToBeac", + f"{verb('guide')} the (?:{gesture_person_list}|{pose_person_list}) {prep('fromLocPrep')} the {Configuration.pick(configuration, 'start', ['loc'])} {prep('toLocPrep')} the {Configuration.pick(configuration, 'end', ['loc', 'room'])}", + ) + command( + "guideClothPrsFromBeacToBeac", + f"{verb('guide')} the person wearing a {color_clothe_list} {prep('fromLocPrep')} the {Configuration.pick(configuration, 'start', ['loc'])} {prep('toLocPrep')} the {Configuration.pick(configuration, 'end', ['loc', 'room'])}", + ) + command( + "bringMeObjFromPlcmt", + f"{verb('bring')} me {art} {Configuration.pick(configuration, 'object', ['obj'])} {prep('fromLocPrep')} the {Configuration.pick(configuration, 'location', ['plcmtLoc'])}", + ) + command( + "tellCatPropOnPlcmt", + f"{verb('tell')} me what is the {object_comp_list} {Configuration.pick(configuration, 'object', ['singCat'])} {prep('onLocPrep')} the {Configuration.pick(configuration, 'location', ['plcmtLoc'])}", + ) + command( + "greetClothDscInRm", + f"{verb('greet')} the person wearing {art} {color_clothe_list} {prep('inLocPrep')} the {Configuration.pick(configuration, 'location', ['room'])} and {get_possible_sub_commands('foundPers')}", + ) + command( + "greetNameInRm", + f"{verb('greet')} {Configuration.pick(configuration, 'name', ['name'])} {prep('inLocPrep')} the {Configuration.pick(configuration, 'location', ['room'])} and {get_possible_sub_commands('foundPers')}", + ) + command( + "meetNameAtLocThenFindInRm", + f"{verb('meet')} {Configuration.pick(configuration, 'name', ['name'])} {prep('atLocPrep')} the {Configuration.pick(configuration, 'location', ['room'])} then {verb('find')} them {prep('inLocPrep')} the {Configuration.pick(configuration, 'destination', ['room'])}", + ) + command( + "countClothPrsInRoom", + f"{verb('count')} people {prep('inLocPrep')} the {Configuration.pick(configuration, 'location', ['room'])} are wearing {color_clothes_list}", + ) + command( + "tellPrsInfoAtLocToPrsAtLoc", + f"{verb('tell')} the {person_info_list} of the person {prep('atLocPrep')} the {Configuration.pick(configuration, 'location', ['room'])} to the person {prep('atLocPrep')} the {Configuration.pick(configuration, 'destination', ['room'])}", + ) + command( + "followPrsAtLoc", + f"{verb('follow')} the (?:{gesture_person_list}|{pose_person_list}) {Configuration.pick(configuration, 'location', ['inRoom', 'atLoc'])}", + ) + return "|".join(commands) + + +def gpsr_parse(matches: Dict[str, str]) -> Dict[str, Any]: + result: dict[str, Any] = { + "commands": [], + "command_params": [], + } + for key in matches.keys(): + value = matches[key] + if value is None: + continue + key_to_check = key.split("_")[-1] + while key_to_check.isnumeric(): + key = "_".join(key.split("_")[:-1]) + key_to_check = key.split("_")[-1] + if key_to_check == "verb": + result["commands"].append(reverse_translate_verb_dict(value)) + result["command_params"].append({}) + elif key_to_check in [ + "object", + "location", + "gesture", + "room", + "name", + "start", + "end", + "objectcomp", + "clothes", + "talk", + ]: + value_to_add = value + try: + result["command_params"][-1][key_to_check] = value_to_add + except: + continue + else: + print(f"Unhandled key: {key_to_check}") + return result + + +def gpsr_compile_and_parse(config: Configuration, input: str) -> Dict: + input = input.lower() + # remove punctuation + input = re.sub(r"[^\w\s]", "", input) + if input[0] == " ": + input = input[1:] + print(f"Parsed input: {input}") + regex_str = gpsr_regex(config) + regex = re.compile(regex_str) + matches = regex.match(input) + matches = matches.groupdict() + object_categories = ( + config["object_categories_singular"] + config["object_categories_plural"] + ) + return parse_result_dict(gpsr_parse(matches), object_categories) + + +def parse_result_dict(result: Dict, object_categories: List[str]) -> Dict: + """Parses the result dictionary output by the gpsr parse to + handle missing parameters. + + Args: + result (dict): _description_ + + Returns: + dict: _description_ + """ + + for i, command in enumerate(result["commands"]): + if "object" in result["command_params"][i]: + if result["command_params"][i]["object"] in object_categories: + # rename object to object category + result["command_params"][i]["object_category"] = result[ + "command_params" + ][i]["object"] + del result["command_params"][i]["object"] + # Update command params based on the previous commands params + if i > 0: + if "location" not in result["command_params"][i]: + if "location" in result["command_params"][i - 1]: + result["command_params"][i]["location"] = result["command_params"][ + i - 1 + ]["location"] + if "room" not in result["command_params"][i]: + if "room" in result["command_params"][i - 1]: + result["command_params"][i - 1]["room"] = result["command_params"][ + i - 1 + ]["room"] + del result["command_params"][i]["room"] + if "name" not in result["command_params"][i]: + if "name" in result["command_params"][i - 1]: + result["command_params"][i]["name"] = result["command_params"][ + i - 1 + ]["name"] + if "object" not in result["command_params"][i]: + if "object" in result["command_params"][i - 1]: + result["command_params"][i]["object"] = result["command_params"][ + i - 1 + ]["object"] + + return result + + +def reverse_translate_verb_dict(verb: str) -> str: + for master_verb, verbs in verb_dict.items(): + if verb in verbs: + return master_verb + return verb + + +if __name__ == "__main__": + object_categories_plural = ["sticks"] + object_categories_singular = ["stick"] + object_categories = object_categories_singular + object_categories_plural + config: Configuration = { + "person_names": ["guest1", "guest2"], + "location_names": ["sofa", "piano", "kitchen table"], + "placement_location_names": ["kitchen table"], + "room_names": ["living room", "kitchen"], + "object_names": ["cup", "television"], + "object_categories_plural": ["sticks"], + "object_categories_singular": ["stick"], + } + + regex_str = gpsr_regex(config) + + regex = re.compile(regex_str) + + def execute(input: str, object_categories: List[str]): + matches = regex.match(input).groupdict() + return parse_result_dict(gpsr_parse(matches), object_categories) + + print( + execute( + "go to the kitchen then meet guest1 and tell the time", + object_categories, + ) + ) + + # print( + # execute( + # "navigate to the kitchen then find a cup and get it and bring it to the person pointing to the right in the kitchen", + # object_categories, + # ) + # ) + + # print( + # execute( + # "navigate to the kitchen then find a cup and get it and bring it to me", + # object_categories, + # ) + # ) + # print( + # execute( + # "navigate to the kitchen table then find a stick and fetch it and deliver it to guest1 in the living room", + # object_categories, + # ) + # ) + + # print( + # execute( + # "lead the person wearing a red shirt from the sofa to the living room", + # object_categories, + # ) + # ) + + # print( + # execute( + # "tell me what is the biggest stick on the kitchen table", + # object_categories, + # ) + # ) diff --git a/legacy/markers/src/markers/__init__.py b/tasks/lift/CATKIN_IGNORE similarity index 100% rename from legacy/markers/src/markers/__init__.py rename to tasks/lift/CATKIN_IGNORE diff --git a/tasks/receptionist/CMakeLists.txt b/tasks/receptionist/CMakeLists.txt index 512f6a5e6..85d832a1b 100644 --- a/tasks/receptionist/CMakeLists.txt +++ b/tasks/receptionist/CMakeLists.txt @@ -161,10 +161,10 @@ include_directories( ## Mark executable scripts (Python etc.) for installation ## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) +catkin_install_python(PROGRAMS + scripts/main.py + DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +) ## Mark executables for installation ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html diff --git a/tasks/receptionist/README.md b/tasks/receptionist/README.md deleted file mode 100644 index 54796a8cd..000000000 --- a/tasks/receptionist/README.md +++ /dev/null @@ -1,88 +0,0 @@ -# lift - -The lift package - -This package is maintained by: -- [zoe](mailto:zoe.a.evans@kcl.ac.uk) - -## Prerequisites - -This package depends on the following ROS packages: -- catkin (buildtool) -- geometry_msgs (build) -- message_generation (build) -- roscpp (build) -- rospy (build) -- std_msgs (build) -- geometry_msgs (exec) -- roscpp (exec) -- rospy (exec) -- std_msgs (exec) - -Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package! - -## Usage - -Ask the package maintainer to write a `doc/USAGE.md` for their package! - -## Example - -Ask the package maintainer to write a `doc/EXAMPLE.md` for their package! - -## Technical Overview - -Ask the package maintainer to write a `doc/TECHNICAL.md` for their package! - -## ROS Definitions - -### Launch Files - -#### `no_rasa` - -No description provided. - -| Argument | Default | Description | -|:-:|:-:|---| -| is_sim | false | | -| plot_show | false | | -| plot_save | true | | -| debug_with_images | true | | -| publish_markers | true | | -| debug | 3 | | -| rasa | true | | - - -#### `demo` - -No description provided. - -#### `setup` - -No description provided. - -| Argument | Default | Description | -|:-:|:-:|---| -| is_sim | false | | -| plot_show | false | | -| plot_save | true | | -| debug_with_images | true | | -| publish_markers | true | | -| debug | 3 | | -| rasa | true | | -| whisper_matcher | by-index | | -| whisper_device_param | 13 | | -| rasa_model | $(find lasr_rasa)/assistants/lift/models | | - - - -### Messages - -This package has no messages. - -### Services - -This package has no services. - -### Actions - -This package has no actions. diff --git a/tasks/receptionist/__init__.py b/tasks/receptionist/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tasks/receptionist/config/lab.yaml b/tasks/receptionist/config/lab.yaml new file mode 100644 index 000000000..a3adc5c93 --- /dev/null +++ b/tasks/receptionist/config/lab.yaml @@ -0,0 +1,42 @@ +priors: + names: + - Adel + - Angel + - Axel + - Charlie + - Jane + - Jules + - Morgan + - Paris + - Robin + - Simone + drinks: + - cola + - iced tea + - juice pack + - milk + - orange juice + - red wine + - tropical juice +wait_pose: + position: + x: 2.4307581363168773 + y: -1.661594410669659 + z: 0.0 + orientation: + x: 0.0 + y: 0.0 + z: 0.012769969339563213 + w: 0.9999184606171978 +wait_area: [[2.65, -0.61], [4.21, -0.33], [4.58, -2.27], [2.67, -2.66]] +seat_pose: + position: + x: 1.1034954065916212 + y: 0.17802904565746552 + z: 0.0 + orientation: + x: 0.0 + y: 0.0 + z: 0.816644293927375 + w: 0.577141314753899 +seat_area: [[-0.39, 0.87], [-0.74, 2.18], [1.26, 2.64], [1.54, 1.26]] diff --git a/tasks/receptionist/config/motions.yaml b/tasks/receptionist/config/motions.yaml index de45b5d18..06a719ab6 100644 --- a/tasks/receptionist/config/motions.yaml +++ b/tasks/receptionist/config/motions.yaml @@ -5,7 +5,7 @@ play_motion: points: - positions: [0.5, 0.0] time_from_start: 0.0 - look_center: + look_centre: joints: [head_1_joint, head_2_joint] points: - positions: [0.0, 0.0] @@ -20,7 +20,7 @@ play_motion: points: - positions: [0.5, -0.25] time_from_start: 0.0 - look_down_center: + look_down_centre: joints: [head_1_joint, head_2_joint] points: - positions: [0.0, -0.25] diff --git a/tasks/receptionist/config/receptionist_demo.yaml b/tasks/receptionist/config/receptionist_demo.yaml deleted file mode 100644 index 55c48730c..000000000 --- a/tasks/receptionist/config/receptionist_demo.yaml +++ /dev/null @@ -1,42 +0,0 @@ -start: - pose: - position: {x: 0.869912857238, y: 1.07249069916, z: 0.0} - orientation: {x: 0.0, y: 0.0, z: -0.629267081124, w: 0.777189127957} - - -host: - name: "zoe" - -guest1: - name: "unknown" - drink: "unknown" - -guest2: - name: "unknown" - drink: "unknown" - - -guestcount: - count: 0 - -wait_area: - polygon: [[1.94, 0.15], [2.98, 0.28], [3.08, -0.68], [2.06, -0.84]] - - -wait_position: - pose: - position: {x: 0.576111426292, y: -0.688977508097, z: 0.0} - orientation: {x: 0.0, y: 0.0, z: 0.16777977598, w: 0.985824501} - - -#Hardcoded, TODO remove: -talk_position: - pose: - position: {x: 1.21588332458, y: -0.507325055265, z: 0.0} - orientation: {x: 0.0, y: 0.0, z: 0.121772525848, w: 0.992558034549} - - -seating_area_position: - pose: - position: {x: 2.12474401462, y: -2.2094874081, z: 0.0} - orientation: {x: 0.0, y: 0.0, z: -0.527834895731, w: 0.849346997904} diff --git a/tasks/receptionist/launch/demo.launch b/tasks/receptionist/launch/demo.launch deleted file mode 100644 index 450e8fa6c..000000000 --- a/tasks/receptionist/launch/demo.launch +++ /dev/null @@ -1,5 +0,0 @@ - - - - - diff --git a/tasks/receptionist/launch/setup.launch b/tasks/receptionist/launch/setup.launch index 71bce994b..55b7c235e 100644 --- a/tasks/receptionist/launch/setup.launch +++ b/tasks/receptionist/launch/setup.launch @@ -1,70 +1,25 @@ - - - - - - - - - - + + - - - - - - + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - + + - - - - - - - + + - - - - - - - diff --git a/tasks/receptionist/scripts/main.py b/tasks/receptionist/scripts/main.py new file mode 100755 index 000000000..808952737 --- /dev/null +++ b/tasks/receptionist/scripts/main.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python3 +import rospy +from receptionist.state_machine import Receptionist + +from geometry_msgs.msg import Pose, Point, Quaternion +from shapely.geometry import Polygon + +if __name__ == "__main__": + rospy.init_node("receptionist_robocup") + wait_pose_param = rospy.get_param("/receptionist/wait_pose") + wait_pose = Pose( + position=Point(**wait_pose_param["position"]), + orientation=Quaternion(**wait_pose_param["orientation"]), + ) + + wait_area_param = rospy.get_param("/receptionist/wait_area") + wait_area = Polygon(wait_area_param) + + seat_pose_param = rospy.get_param("/receptionist/seat_pose") + seat_pose = Pose( + position=Point(**seat_pose_param["position"]), + orientation=Quaternion(**seat_pose_param["orientation"]), + ) + + seat_area_param = rospy.get_param("/receptionist/seat_area") + seat_area = Polygon(seat_area_param) + + receptionist = Receptionist( + wait_pose, + wait_area, + seat_pose, + seat_area, + { + "name": "John", + "drink": "beer", + "attributes": { + "hair_colour": "strawberry blonde", + "glasses": False, + "hat": True, + "height": "tall", + }, + }, + ) + outcome = receptionist.execute() + rospy.loginfo(f"Receptionist finished with outcome: {outcome}") + rospy.spin() diff --git a/tasks/receptionist/src/receptionist/__init__.py b/tasks/receptionist/src/receptionist/__init__.py index 396df9212..e69de29bb 100644 --- a/tasks/receptionist/src/receptionist/__init__.py +++ b/tasks/receptionist/src/receptionist/__init__.py @@ -1 +0,0 @@ -from .default import Default \ No newline at end of file diff --git a/tasks/receptionist/src/receptionist/default.py b/tasks/receptionist/src/receptionist/default.py deleted file mode 100644 index 0e70c83fc..000000000 --- a/tasks/receptionist/src/receptionist/default.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python3 -from tiago_controllers.controllers import Controllers -from lasr_voice.voice import Voice -from lasr_vision_msgs.srv import YoloDetection -import rospy, actionlib -from tiago_controllers.controllers.base_controller import CmdVelController -from interaction_module.srv import AudioAndTextInteraction -from play_motion_msgs.msg import PlayMotionGoal, PlayMotionAction -from lasr_speech.srv import Speech -from tf_module.srv import BaseTransformRequest, ApplyTransformRequest, LatestTransformRequest, BaseTransform, \ - LatestTransform, ApplyTransform, TfTransform, TfTransformRequest - -from cv_bridge3 import CvBridge -from lasr_shapely import LasrShapely -from std_msgs.msg import Int16, Empty - - -rasa = True - -class Default: - def __init__(self): - rospy.loginfo("YOLO is here") - self.yolo = rospy.ServiceProxy('/yolov8/detect', YoloDetection) - rospy.loginfo("PM is here") - self.pm = actionlib.SimpleActionClient('/play_motion', PlayMotionAction) - rospy.loginfo("Controllers is here") - self.voice = Voice() - self.bridge = CvBridge() - rospy.loginfo("CV Bridge") - self.shapely = LasrShapely() - rospy.loginfo("Got shapely") - self.controllers = Controllers() - rospy.loginfo("CMD is here") - self.cmd = CmdVelController() - rospy.loginfo("Voice is here") - - self.tf = rospy.ServiceProxy('tf_transform', TfTransform) - print("TF is here") - self.tf_base = rospy.ServiceProxy('base_transform', BaseTransform) - self.tf_latest = rospy.ServiceProxy('latest_transform', LatestTransform) - self.tf_apply = rospy.ServiceProxy('apply_transform', ApplyTransform) - if rasa: - rospy.wait_for_service("/lasr_speech/transcribe_and_parse") - rospy.loginfo("SPEECH RASA is here") - self.speech = rospy.ServiceProxy("/lasr_speech/transcribe_and_parse", Speech) - else: - pass - rospy.loginfo("SPEECH Dialogflow is here") - self.speech = rospy.ServiceProxy("/interaction_module", AudioAndTextInteraction) - - if not rospy.get_published_topics(namespace='/pal_head_manager'): - rospy.loginfo("Is SIM ---> True") - rospy.set_param("/is_simulation", True) - else: - rospy.loginfo("Is SIM ---> FALSE") - rospy.set_param("/is_simulation", False) - diff --git a/tasks/receptionist/src/receptionist/defaults.py b/tasks/receptionist/src/receptionist/defaults.py deleted file mode 100755 index 0b0a3579f..000000000 --- a/tasks/receptionist/src/receptionist/defaults.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python3 -import os, rospkg, shutil, rospy, sys - -rospy.loginfo("setting debug") -DEBUG = rospy.get_param('debug') # 3 -rospy.loginfo("setting Debug with images ") -DEBUG_WITH_IMAGES = rospy.get_param('debug_with_images') - - -# for matplotlib -rospy.loginfo("setting Plot SHOW ") -PLOT_SHOW = rospy.get_param('plot_show') -rospy.loginfo("setting Plot Save ") -PLOT_SAVE = rospy.get_param('plot_save') -rospy.loginfo("setting Publish Markers ") -PUBLISH_MARKERS = rospy.get_param('publish_markers') -rospy.loginfo("setting Debug Path ") -DEBUG_PATH = os.getcwd() -rospy.loginfo("setting Rasa ") -RASA = rospy.get_param('rasa') - -rospy.logwarn("DEBUG: {DEBUG}, DEBUG_WITH_IMAGES: {DEBUG_WITH_IMAGES}, PLOT_SHOW: {PLOT_SHOW}, PLOT_SAVE: {PLOT_SAVE}".format(**locals())) - -if DEBUG_WITH_IMAGES: - if not os.path.exists(os.path.join(rospkg.RosPack().get_path("receptionist"), "debug_receptionist")): - os.mkdir(os.path.join(rospkg.RosPack().get_path("receptionist"), "debug_receptionist")) - DEBUG_PATH = os.path.join(rospkg.RosPack().get_path("receptionist"), "debug_receptionist",) - if os.path.exists(DEBUG_PATH): - shutil.rmtree(DEBUG_PATH) - os.mkdir(DEBUG_PATH) - - -# for plots in waypoitns -import random -TEST = random.randint(0, 1000) \ No newline at end of file diff --git a/tasks/receptionist/src/receptionist/main.py b/tasks/receptionist/src/receptionist/main.py deleted file mode 100755 index 4d7e81fbc..000000000 --- a/tasks/receptionist/src/receptionist/main.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python3 -import rospy -from receptionist.sm import Receptionist - -if __name__ == "__main__": - rospy.init_node("receptionist_robocup") - receptionist = Receptionist() - outcome = receptionist.execute() - print(outcome) - rospy.spin() - - \ No newline at end of file diff --git a/tasks/receptionist/src/receptionist/sm.py b/tasks/receptionist/src/receptionist/sm.py deleted file mode 100755 index 91556c037..000000000 --- a/tasks/receptionist/src/receptionist/sm.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env python3 -import smach -from receptionist.states import Start -from receptionist.states import AskForName -from receptionist.states import AskForDrink -from receptionist.states import End -from receptionist import Default -from receptionist.states import GoToPerson -from receptionist.states import GoToSeatingArea -from receptionist.states import LookForSeats -from receptionist.states import GoToWaitForPerson -from receptionist.states import SpeakDescriptions -from lasr_skills import WaitForPersonInArea -from lasr_skills import DescribePeople -#from receptionist.states.end import End - - -class Receptionist(smach.StateMachine): - def __init__(self): - smach.StateMachine.__init__(self, outcomes=['succeeded', 'failed']) - self.default = Default() - self.userdata.area_polygon = [[1.94, 0.15], [2.98, 0.28], [3.08, -0.68], [2.06, -0.84]] - self.userdata.depth_topic = "/xtion/depth_registered/points" - with self: - smach.StateMachine.add('START', Start(self.default), transitions={'succeeded' : 'GO_TO_WAIT_FOR_PERSON'}) - smach.StateMachine.add("GO_TO_WAIT_FOR_PERSON",GoToWaitForPerson(self.default), transitions={'succeeded': 'WAIT_FOR_PERSON'}) - smach.StateMachine.add('WAIT_FOR_PERSON', WaitForPersonInArea() ,transitions={'succeeded' : 'GO_TO_PERSON', 'failed' : 'failed'}) - smach.StateMachine.add('GO_TO_PERSON', GoToPerson(self.default),transitions={'succeeded':'ASK_FOR_NAME'}) - smach.StateMachine.add('ASK_FOR_NAME', AskForName(self.default),transitions={'failed':'ASK_FOR_NAME','succeeded':'ASK_FOR_DRINK'}) - smach.StateMachine.add('ASK_FOR_DRINK', AskForDrink(self.default),transitions={'failed':'ASK_FOR_DRINK','succeeded':'DESCRIBE_PEOPLE'}) - smach.StateMachine.add('DESCRIBE_PEOPLE', DescribePeople(),transitions={'succeeded':'SPEAK_DESCRIPTIONS','failed':'GO_TO_SEATING_AREA'}) - smach.StateMachine.add('SPEAK_DESCRIPTIONS', SpeakDescriptions(self.default), transitions={'failed':'GO_TO_SEATING_AREA','succeeded':'GO_TO_SEATING_AREA'}) - smach.StateMachine.add('GO_TO_SEATING_AREA', GoToSeatingArea(self.default), transitions={'succeeded' : 'LOOK_FOR_SEATS'}) - smach.StateMachine.add('LOOK_FOR_SEATS', LookForSeats(self.default), transitions={'succeeded' : 'GO_TO_WAIT_FOR_PERSON'}) - smach.StateMachine.add('END', End(self.default),transitions={'succeeded':'succeeded'}) - # smach.StateMachine.add('DESCRIBE_PEOPLE', DescribePeople(),transitions={'succeeded':'SPEAK_DESCRIPTIONS','failed':'DESCRIBE_PEOPLE'}) - # smach.StateMachine.add('SPEAK_DESCRIPTIONS', SpeakDescriptions(self.default), transitions={'failed':'DESCRIBE_PEOPLE','succeeded':'DESCRIBE_PEOPLE'}) diff --git a/tasks/receptionist/src/receptionist/speech_helper.py b/tasks/receptionist/src/receptionist/speech_helper.py deleted file mode 100755 index 40878e807..000000000 --- a/tasks/receptionist/src/receptionist/speech_helper.py +++ /dev/null @@ -1,49 +0,0 @@ -import rospy -import json - - -def listen(default): - print("trying to listen!") - resp = default.speech(True) - print("Resp success: ", resp.success) - if not resp.success: - default.voice.speak("Sorry, I didn't get that") - return listen(default) - resp = json.loads(resp.json_response) - rospy.loginfo(resp) - return resp - -def affirm(default): - resp = listen(default) - if resp['intent']['name'] != 'affirm': - default.voice.speak("Sorry, I didn't get that, please say yes or no") - return affirm(default) - choices = resp["entities"].get("choice", None) - if choices is None: - default.voice.speak("Sorry, I didn't get that") - return affirm(default) - choice = choices[0]["value"].lower() - if choice not in ["yes", "no"]: - default.voice.speak("Sorry, I didn't get that") - return affirm(default) - return choice - -def get_drink(default): - resp = listen(default) - # if resp['intent']['name'] != 'fav_drink': - # return "unknown" - drink = resp["entities"].get("drink",[]) - if drink is None or not drink: - return "unknown" - drink = drink[0]["value"].lower() - return str(drink) - -def get_name(default): - resp = listen(default) - # if resp['intent']['name'] != 'name': - # return "unknown" - name = resp["entities"].get("name",[]) - if name is None or not name: - return "unknown" - name = name[0]["value"].lower() - return str(name) \ No newline at end of file diff --git a/tasks/receptionist/src/receptionist/state_machine.py b/tasks/receptionist/src/receptionist/state_machine.py new file mode 100755 index 000000000..05e038bfb --- /dev/null +++ b/tasks/receptionist/src/receptionist/state_machine.py @@ -0,0 +1,284 @@ +import smach + +from geometry_msgs.msg import Pose +from shapely.geometry import Polygon +from lasr_skills import GoToLocation, WaitForPersonInArea, Say, AskAndListen +from receptionist.states import ( + ParseNameAndDrink, + GetGuestAttributes, + Introduce, + SeatGuest, +) + + +class Receptionist(smach.StateMachine): + + def __init__( + self, + wait_pose: Pose, + wait_area: Polygon, + seat_pose: Pose, + seat_area: Polygon, + host_data: dict, + ): + + smach.StateMachine.__init__(self, outcomes=["succeeded", "failed"]) + + with self: + + self.userdata.guest_data = {"host": host_data, "guest1": {}, "guest2": {}} + + smach.StateMachine.add( + "GO_TO_WAIT_LOCATION_GUEST_1", + GoToLocation(wait_pose), + transitions={ + "succeeded": "SAY_WAITING_GUEST_1", + "failed": "failed", + }, + ) + + smach.StateMachine.add( + "SAY_WAITING_GUEST_1", + Say(text="I am waiting for a guest."), + transitions={ + "succeeded": "WAIT_FOR_PERSON_GUEST_1", + "aborted": "failed", + "preempted": "failed", + }, + ) + + smach.StateMachine.add( + "WAIT_FOR_PERSON_GUEST_1", + WaitForPersonInArea(wait_area), + transitions={ + "succeeded": "GET_NAME_AND_DRINK_GUEST_1", + "failed": "failed", + }, + ) + + smach.StateMachine.add( + "GET_NAME_AND_DRINK_GUEST_1", + AskAndListen("What is your name and favourite drink?"), + transitions={ + "succeeded": "PARSE_NAME_AND_DRINK_GUEST_1", + "failed": "failed", + }, + ) + + smach.StateMachine.add( + "PARSE_NAME_AND_DRINK_GUEST_1", + ParseNameAndDrink("guest1"), + transitions={ + "succeeded": "GET_GUEST_ATTRIBUTES_GUEST_1", + "failed": "failed", + }, + remapping={"guest_transcription": "transcribed_speech"}, + ) + + smach.StateMachine.add( + "GET_GUEST_ATTRIBUTES_GUEST_1", + GetGuestAttributes("guest1"), + transitions={ + "succeeded": "SAY_FOLLOW_GUEST_1", + "failed": "failed", + }, + ) + + smach.StateMachine.add( + "SAY_FOLLOW_GUEST_1", + Say(text="Please follow me, I will guide you to the other guests"), + transitions={ + "succeeded": "GO_TO_SEAT_LOCATION_GUEST_1", + "preempted": "failed", + "aborted": "failed", + }, + ) + + smach.StateMachine.add( + "GO_TO_SEAT_LOCATION_GUEST_1", + GoToLocation(seat_pose), + transitions={ + "succeeded": "SAY_WAIT_GUEST_1", + "failed": "failed", + }, + ) + + smach.StateMachine.add( + "SAY_WAIT_GUEST_1", + Say(text="Please wait here on my left"), + transitions={ + "succeeded": "INTRODUCE_GUEST_1_TO_HOST", + "preempted": "failed", + "aborted": "failed", + }, + ) + + smach.StateMachine.add( + "INTRODUCE_GUEST_1_TO_HOST", + Introduce(guest_to_introduce="guest1", guest_to_introduce_to="host"), + transitions={ + "succeeded": "INTRODUCE_HOST_TO_GUEST_1", + "failed": "failed", + }, + ) + + smach.StateMachine.add( + "INTRODUCE_HOST_TO_GUEST_1", + Introduce(guest_to_introduce="host", guest_to_introduce_to="guest1"), + transitions={ + "succeeded": "SEAT_GUEST_1", + "failed": "failed", + }, + ) + + smach.StateMachine.add( + "SEAT_GUEST_1", + SeatGuest(seat_area), + transitions={ + "succeeded": "GO_TO_WAIT_LOCATION_GUEST_2", + "failed": "failed", + }, + ) + + """ + Guest 2 + """ + + smach.StateMachine.add( + "GO_TO_WAIT_LOCATION_GUEST_2", + GoToLocation(wait_pose), + transitions={ + "succeeded": "SAY_WAITING_GUEST_2", + "failed": "failed", + }, + ) + + smach.StateMachine.add( + "SAY_WAITING_GUEST_2", + Say(text="I am waiting for a guest."), + transitions={ + "succeeded": "WAIT_FOR_PERSON_GUEST_2", + "aborted": "failed", + "preempted": "failed", + }, + ) + + smach.StateMachine.add( + "WAIT_FOR_PERSON_GUEST_2", + WaitForPersonInArea(wait_area), + transitions={ + "succeeded": "GET_NAME_AND_DRINK_GUEST_2", + "failed": "failed", + }, + ) + + smach.StateMachine.add( + "GET_NAME_AND_DRINK_GUEST_2", + AskAndListen("What is your name and favourite drink?"), + transitions={ + "succeeded": "PARSE_NAME_AND_DRINK_GUEST_2", + "failed": "failed", + }, + ) + + smach.StateMachine.add( + "PARSE_NAME_AND_DRINK_GUEST_2", + ParseNameAndDrink("guest2"), + transitions={ + "succeeded": "GET_GUEST_ATTRIBUTES_GUEST_2", + "failed": "failed", + }, + remapping={"guest_transcription": "transcribed_speech"}, + ) + + smach.StateMachine.add( + "GET_GUEST_ATTRIBUTES_GUEST_2", + GetGuestAttributes("guest2"), + transitions={ + "succeeded": "SAY_FOLLOW_GUEST_2", + "failed": "failed", + }, + ) + + smach.StateMachine.add( + "SAY_FOLLOW_GUEST_2", + Say(text="Please follow me, I will guide you to the other guests"), + transitions={ + "succeeded": "GO_TO_SEAT_LOCATION_GUEST_2", + "preempted": "failed", + "aborted": "failed", + }, + ) + + smach.StateMachine.add( + "GO_TO_SEAT_LOCATION_GUEST_2", + GoToLocation(seat_pose), + transitions={ + "succeeded": "SAY_WAIT_GUEST_2", + "failed": "failed", + }, + ) + + smach.StateMachine.add( + "SAY_WAIT_GUEST_2", + Say(text="Please wait here on my left"), + transitions={ + "succeeded": "INTRODUCE_GUEST_2_TO_EVERYONE", + "preempted": "failed", + "aborted": "failed", + }, + ) + + smach.StateMachine.add( + "INTRODUCE_GUEST_2_TO_EVERYONE", + Introduce(guest_to_introduce="guest2", everyone=True), + transitions={ + "succeeded": "INTRODUCE_HOST_TO_GUEST_2", + "failed": "failed", + }, + ) + + smach.StateMachine.add( + "INTRODUCE_HOST_TO_GUEST_2", + Introduce(guest_to_introduce="host", guest_to_introduce_to="guest2"), + transitions={ + "succeeded": "INTRODUCE_GUEST_1_TO_GUEST_2", + "failed": "failed", + }, + ) + + smach.StateMachine.add( + "INTRODUCE_GUEST_1_TO_GUEST_2", + Introduce(guest_to_introduce="guest1", guest_to_introduce_to="guest2"), + transitions={ + "succeeded": "SEAT_GUEST_2", + "failed": "failed", + }, + ) + + smach.StateMachine.add( + "SEAT_GUEST_2", + SeatGuest(seat_area), + transitions={"succeeded": "GO_TO_FINISH_LOCATION", "failed": "failed"}, + ) + + """ + Finish + """ + smach.StateMachine.add( + "GO_TO_FINISH_LOCATION", + GoToLocation(wait_pose), + transitions={ + "succeeded": "SAY_FINISHED", + "failed": "failed", + }, + ) + smach.StateMachine.add( + "SAY_FINISHED", + Say(text="I am done."), + transitions={ + "succeeded": "succeeded", + "aborted": "failed", + "preempted": "failed", + }, + ) diff --git a/tasks/receptionist/src/receptionist/states/__init__.py b/tasks/receptionist/src/receptionist/states/__init__.py index 0f0114b7c..3a25c381c 100644 --- a/tasks/receptionist/src/receptionist/states/__init__.py +++ b/tasks/receptionist/src/receptionist/states/__init__.py @@ -1,9 +1,4 @@ -from .ask_for_name import AskForName -from .ask_for_drink import AskForDrink -from .end import End -from .go_to_person import GoToPerson -from .go_to_seating_area import GoToSeatingArea -from .go_to_wait_for_person import GoToWaitForPerson -from .look_for_seats import LookForSeats -from .start import Start -from .speakdescriptions import SpeakDescriptions \ No newline at end of file +from .get_name_and_drink import ParseNameAndDrink +from .get_attributes import GetGuestAttributes +from .introduce import Introduce +from .seat_guest import SeatGuest diff --git a/tasks/receptionist/src/receptionist/states/ask_for_drink.py b/tasks/receptionist/src/receptionist/states/ask_for_drink.py deleted file mode 100644 index e35f3a38d..000000000 --- a/tasks/receptionist/src/receptionist/states/ask_for_drink.py +++ /dev/null @@ -1,35 +0,0 @@ -import smach -import rospy -from receptionist.speech_helper import get_drink - -class AskForDrink(smach.State): - def __init__(self, default): - smach.State.__init__(self, outcomes=['succeeded','failed']) - self.default = default - - - def execute(self, userdata): - - guestcount = rospy.get_param("guestcount/count", 0) - - self.default.voice.speak("What is your favourite drink?") - - - for _ in range(3): - try: - drink = get_drink(self.default) - except: - drink = "unknown " - if drink == "unknown": - self.default.voice.speak("Sorry, I didn't get that. Could you repeat, please?") - else: - break - - if drink == "unknown": - self.default.voice.speak("Sadly, I couldn't understand your favourite drink. You might go thirsty this evening.") - else: - self.default.voice.speak(f"{drink}, what a great drink!") - - rospy.set_param(f"guest{guestcount+1}/drink", drink) - rospy.set_param("guestcount/count", guestcount+1) - return 'succeeded' diff --git a/tasks/receptionist/src/receptionist/states/ask_for_name.py b/tasks/receptionist/src/receptionist/states/ask_for_name.py deleted file mode 100644 index 3c2f1dcf4..000000000 --- a/tasks/receptionist/src/receptionist/states/ask_for_name.py +++ /dev/null @@ -1,33 +0,0 @@ -import smach -import rospy -from receptionist.speech_helper import get_name - -class AskForName(smach.State): - def __init__(self, default): - smach.State.__init__(self, outcomes=['succeeded','failed']) - self.default = default - - - def execute(self, userdata): - - guestcount = rospy.get_param("guestcount/count", 0) - - self.default.voice.speak("What is your name?") - - for _ in range(3): - try: - name = get_name(self.default) - except: - name = "unknown" - if name == "unknown": - self.default.voice.speak("Sorry, I didn't get that. Could you repeat, please?") - else: - break - - if name == "unknown": - self.default.voice.speak("I couldn't understand your name, but it's great to meet you!") - else: - self.default.voice.speak(f"It's great to meet you {name}!") - - rospy.set_param(f"guest{guestcount+1}/name", name) - return 'succeeded' diff --git a/tasks/receptionist/src/receptionist/states/end.py b/tasks/receptionist/src/receptionist/states/end.py deleted file mode 100644 index 3d350a00d..000000000 --- a/tasks/receptionist/src/receptionist/states/end.py +++ /dev/null @@ -1,20 +0,0 @@ -import smach -import rospy -from tiago_controllers.helpers.pose_helpers import get_pose_from_param - - -class End(smach.State): - def __init__(self, default): - smach.State.__init__(self, outcomes=['succeeded']) - self.default = default - - def execute(self, userdata): - guest1name = rospy.get_param('guest1/name') - guest1drink = rospy.get_param('guest1/drink') - guest2drink = rospy.get_param('guest2/drink') - guest2name = rospy.get_param('guest2/name') - - self.default.voice.speak(f"{guest1name}'s favourite drink was {guest1drink}") - self.default.voice.speak(f"{guest2name}'s favourite drink was {guest2drink}") - - return 'succeeded' \ No newline at end of file diff --git a/tasks/receptionist/src/receptionist/states/get_attributes.py b/tasks/receptionist/src/receptionist/states/get_attributes.py new file mode 100644 index 000000000..22538a267 --- /dev/null +++ b/tasks/receptionist/src/receptionist/states/get_attributes.py @@ -0,0 +1,103 @@ +""" +State for calling the service to get a set of guest attributes. +Currently incomplete. +""" + +import rospy +import smach +from smach import UserData +from typing import List, Any, Dict, Union +from lasr_vision_clip.srv import VqaRequest, VqaResponse, Vqa + + +class GetGuestAttributes(smach.State): + def __init__( + self, + guest_id: str, + attribute_service: Union[str, None] = None, + outcomes: List[str] = ["succeeded", "failed"], + input_keys: List[str] = ["guest_id", "guest_data"], + output_keys: List[str] = ["guest_data"], + ): + """Calls and parses the service that gets a set of guest attributes. + + Args: + attribute_service (str): Name of the service to call that returns the guest's attributes. + """ + + super().__init__( + outcomes=outcomes, + input_keys=input_keys, + output_keys=output_keys, + ) + self._service_proxy = rospy.ServiceProxy("/clip_vqa/query_service", Vqa) + self._guest_id: str = guest_id + self._attribute_service: Union[str, None] = attribute_service + + def _call_attribute_service(self): + # TODO + pass + + def _send_vqa_request(self, possible_answers: List[str]) -> str: + request = VqaRequest() + request.possible_answers = possible_answers + response = self._service_proxy(request) + return response.answer + + def execute(self, userdata: UserData) -> str: + if self._attribute_service: + attributes = self._call_attribute_service() + else: + glasses_answers = [ + "a person wearing glasses", + "a person not wearing glasses", + ] + hat_answers = ["a person wearing a hat", "a person not wearing a hat"] + height_answers = ["a tall person", "a short person"] + hair_colour_answers = [ + "a person with black hair", + "a person with blonde hair", + "a person with brown hair", + "a person with red hair", + "a person with grey hair", + "a person with white hair", + ] + + glasses_response = self._send_vqa_request(glasses_answers) + hat_response = self._send_vqa_request(hat_answers) + height_response = self._send_vqa_request(height_answers) + hair_colour_response = self._send_vqa_request(hair_colour_answers) + + if glasses_response == "a person wearing glasses": + glasses = True + else: + glasses = False + if hat_response == "a person wearing a hat": + hat = True + else: + hat = False + if height_response == "a tall person": + height = "tall" + else: + height = "short" + if hair_colour_response == "a person with black hair": + hair_colour = "black" + elif hair_colour_response == "a person with blonde hair": + hair_colour = "blonde" + elif hair_colour_response == "a person with brown hair": + hair_colour = "brown" + elif hair_colour_response == "a person with red hair": + hair_colour = "red" + elif hair_colour_response == "a person with grey hair": + hair_colour = "grey" + + attributes = { + "hair_colour": hair_colour, + "glasses": glasses, + "hat": hat, + "height": height, + } + + userdata.guest_data[self._guest_id]["attributes"] = attributes + + return "succeeded" diff --git a/tasks/receptionist/src/receptionist/states/get_name_and_drink.py b/tasks/receptionist/src/receptionist/states/get_name_and_drink.py new file mode 100644 index 000000000..e2da4fede --- /dev/null +++ b/tasks/receptionist/src/receptionist/states/get_name_and_drink.py @@ -0,0 +1,78 @@ +""" +State for parsing the transcription of the guests' name and favourite drink, and adding this +to the guest data userdata +""" + +import rospy +import smach +from smach import UserData +from typing import List, Dict, Any + + +class ParseNameAndDrink(smach.State): + def __init__( + self, + guest_id: str, + param_key: str = "receptionist/priors", + ): + """Parses the transcription of the guests' name and favourite drink. + + Args: + param_key (str, optional): Name of the parameter that contains the list of + possible . Defaults to "receptionist/priors". + """ + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + input_keys=["guest_transcription", "guest_data"], + output_keys=["guest data", "guest_transcription"], + ) + self._guest_id = guest_id + prior_data: Dict[str, List[str]] = rospy.get_param(param_key) + self._possible_names = [name.lower() for name in prior_data["names"]] + self._possible_drinks = [drink.lower() for drink in prior_data["drinks"]] + + def execute(self, userdata: UserData) -> str: + """Parses the transcription of the guests' name and favourite drink. + + Args: + userdata (UserData): State machine userdata assumed to contain a key + called "guest transcription" with the transcription of the guest's name and + favourite drink. + + Returns: + str: state outcome. Updates the userdata with the parsed name and drink, under + the parameter "guest data". + """ + + outcome = "succeeded" + name_found = False + drink_found = False + print(userdata) + print(type(userdata.guest_transcription)) + transcription = userdata.guest_transcription.lower() + + transcription = userdata["guest_transcription"].lower() + + for name in self._possible_names: + if name in transcription: + userdata.guest_data[self._guest_id]["name"] = name + rospy.loginfo(f"Guest Name identified as: {name}") + name_found = True + break + + for drink in self._possible_drinks: + if drink in transcription: + userdata.guest_data[self._guest_id]["drink"] = drink + rospy.loginfo(f"Guest Drink identified as: {drink}") + drink_found = True + break + + if not name_found: + rospy.loginfo("Name not found in transcription") + outcome = "failed" + if not drink_found: + rospy.loginfo("Drink not found in transcription") + outcome = "failed" + + return outcome diff --git a/tasks/receptionist/src/receptionist/states/go_to_person.py b/tasks/receptionist/src/receptionist/states/go_to_person.py deleted file mode 100644 index cd00aff0b..000000000 --- a/tasks/receptionist/src/receptionist/states/go_to_person.py +++ /dev/null @@ -1,15 +0,0 @@ -import smach -import rospy -from tiago_controllers.helpers.pose_helpers import get_pose_from_param - - -class GoToPerson(smach.State): - def __init__(self, default): - smach.State.__init__(self, outcomes=['succeeded']) - self.default = default - - def execute(self, userdata): - self.default.voice.speak("Going to person") - res = self.default.controllers.base_controller.ensure_sync_to_pose(get_pose_from_param('/talk_position/pose')) - rospy.logerr(res) - return 'succeeded' diff --git a/tasks/receptionist/src/receptionist/states/go_to_seating_area.py b/tasks/receptionist/src/receptionist/states/go_to_seating_area.py deleted file mode 100644 index 8e4cd87c2..000000000 --- a/tasks/receptionist/src/receptionist/states/go_to_seating_area.py +++ /dev/null @@ -1,16 +0,0 @@ -import smach -import rospy -from tiago_controllers.helpers.pose_helpers import get_pose_from_param - - -class GoToSeatingArea(smach.State): - def __init__(self, default): - smach.State.__init__(self, outcomes=['succeeded']) - self.default = default - - def execute(self, userdata): - self.default.voice.speak("Going to seating area") - res = self.default.controllers.base_controller.ensure_sync_to_pose(get_pose_from_param('/seating_area_position/pose')) - rospy.logerr(res) - - return 'succeeded' diff --git a/tasks/receptionist/src/receptionist/states/go_to_wait_for_person.py b/tasks/receptionist/src/receptionist/states/go_to_wait_for_person.py deleted file mode 100644 index 951015884..000000000 --- a/tasks/receptionist/src/receptionist/states/go_to_wait_for_person.py +++ /dev/null @@ -1,16 +0,0 @@ -import smach -import rospy -from tiago_controllers.helpers.pose_helpers import get_pose_from_param - - -class GoToWaitForPerson(smach.State): - def __init__(self, default): - smach.State.__init__(self, outcomes=['succeeded']) - self.default = default - - def execute(self, userdata): - self.default.voice.speak("I will wait for a person") - res = self.default.controllers.base_controller.ensure_sync_to_pose(get_pose_from_param('/wait_position/pose')) - rospy.logerr(res) - - return 'succeeded' diff --git a/tasks/receptionist/src/receptionist/states/introduce.py b/tasks/receptionist/src/receptionist/states/introduce.py new file mode 100644 index 000000000..cedb54740 --- /dev/null +++ b/tasks/receptionist/src/receptionist/states/introduce.py @@ -0,0 +1,149 @@ +""" +State machine that introduces the greeted guest to all other guests/host present in the +seating area. + +""" + +import rospy +import smach +from smach import UserData +from lasr_skills import Say +from typing import Dict, Any, Optional + + +def stringify_guest_data(guest_data: Dict[str, Any], guest_id: str) -> str: + """Converts the guest data for a specified guest into a string that can be used + for the robot to introduce the guest to the other guests/host. + + Args: + guest_data (Dict[str, Any]): guest data dictionary. + guest_id (str): guest id key to use to get the guest data. + + Returns: + str: string representation of the guest data. + """ + + relevant_guest_data = guest_data[guest_id] + + guest_str = "" + + guest_str += f"{relevant_guest_data['name']}, their favourite drink is {relevant_guest_data['drink']}. " + guest_str += f"They have {relevant_guest_data['attributes']['hair_colour']} hair, their height is {relevant_guest_data['attributes']['height']}, " + guest_str += f"they are {'wearing glasses' if relevant_guest_data['attributes']['glasses'] else 'not wearing glasses'}, and they are " + guest_str += f"{'wearing a hat' if relevant_guest_data['attributes']['hat'] else 'not wearing a hat'}." + + return guest_str + + +class GetStrGuestData(smach.State): + def __init__(self, guest_id: str): + super().__init__( + outcomes=["succeeded"], + input_keys=["guest_data"], + output_keys=["guest_str"], + ) + self._guest_id = guest_id + + def execute(self, userdata: UserData) -> str: + guest_str = stringify_guest_data(userdata.guest_data, self._guest_id) + userdata.guest_str = guest_str + return "succeeded" + + +class GetGuestName(smach.State): + def __init__(self, guest_id: str): + super().__init__( + outcomes=["succeeded"], + input_keys=["guest_data"], + output_keys=["requested_name"], + ) + self._guest_id = guest_id + + def execute(self, userdata: UserData) -> str: + requested_name = userdata.guest_data[self._guest_id]["name"] + userdata.requested_name = requested_name + return "succeeded" + + +class GetIntroductionString(smach.State): + def __init__(self): + super().__init__( + outcomes=["succeeded"], + input_keys=["guest_str", "requested_name"], + output_keys=["introduction_str"], + ) + + def execute(self, userdata: UserData) -> str: + introduction_str = ( + f"Hello {userdata.requested_name}, this is {userdata.guest_str}." + ) + userdata.introduction_str = introduction_str + return "succeeded" + + +class Introduce(smach.StateMachine): + def __init__( + self, + guest_to_introduce: str, + guest_to_introduce_to: Optional[str] = None, + everyone: Optional[bool] = False, + ): + super().__init__( + outcomes=["succeeded", "failed"], + input_keys=["guest_data"], + ) + assert not (guest_to_introduce_to is None and not everyone) + + with self: + if everyone: + smach.StateMachine.add( + "GetStrGuestData", + GetStrGuestData(guest_id=guest_to_introduce), + transitions={"succeeded": "SayIntroduce"}, + ) + smach.StateMachine.add( + "SayIntroduce", + Say( + format_str="Hello everyone, this is {}.", + ), + transitions={ + "succeeded": "succeeded", + "preempted": "failed", + "aborted": "failed", + }, + remapping={"placeholders": "guest_str"}, + ) + + else: + smach.StateMachine.add( + "GetStrGuestData", + GetStrGuestData(guest_id=guest_to_introduce), + transitions={"succeeded": "GetGuestName"}, + ) + + smach.StateMachine.add( + "GetGuestName", + GetGuestName(guest_id=guest_to_introduce_to), + transitions={"succeeded": "GetIntroductionString"}, + ) + + smach.StateMachine.add( + "GetIntroductionString", + GetIntroductionString(), + transitions={"succeeded": "SayIntroduce"}, + remapping={ + "guest_str": "guest_str", + "requested_name": "requested_name", + }, + ) + + smach.StateMachine.add( + "SayIntroduce", + Say(), + transitions={ + "succeeded": "succeeded", + "preempted": "failed", + "aborted": "failed", + }, + remapping={"text": "introduction_str"}, + ) diff --git a/tasks/receptionist/src/receptionist/states/look_for_seats.py b/tasks/receptionist/src/receptionist/states/look_for_seats.py deleted file mode 100755 index f3c96307b..000000000 --- a/tasks/receptionist/src/receptionist/states/look_for_seats.py +++ /dev/null @@ -1,142 +0,0 @@ -#!/usr/bin/env python3 - -import smach -import rospy -import numpy as np -import math -from play_motion_msgs.msg import PlayMotionGoal -from geometry_msgs.msg import Point -from shapely.geometry import Polygon -from lasr_skills import DetectObjects3D, LookToPoint -from copy import copy - -class LookForSeats(smach.StateMachine): - - - class Look(smach.State): - - def __init__(self, default): - smach.State.__init__(self, outcomes=['done', 'not_done'], input_keys=['look_motion']) - self.default = default - self.motions = ['look_down_left', 'look_down_center', 'look_down_right'] - self.remaining_motions = copy(self.motions) - - def execute(self, userdata): - if not self.remaining_motions: - self.remaining_motions = copy(self.motions) - return 'done' - pm_goal = PlayMotionGoal(motion_name=self.remaining_motions.pop(0), skip_planning=True) - self.default.pm.send_goal_and_wait(pm_goal) - rospy.sleep(1.0) - return 'not_done' - - class ProcessDetections(smach.State): - - def __init__(self): - smach.State.__init__(self, outcomes=['succeeded'], input_keys=['detections_3d', 'bulk_people_detections_3d', 'bulk_seat_detections_3d'], output_keys=['bulk_people_detections_3d', 'bulk_seat_detections3d']) - - def execute(self, userdata): - for det in userdata.detections_3d: - if det[0].name == "person": - userdata.bulk_people_detections_3d.append(det) - else: - userdata.bulk_seat_detections_3d.append(det) - return 'succeeded' - - class CheckSeat(smach.State): - def __init__(self, default): - smach.State.__init__(self, outcomes=['not_done', 'succeeded', 'failed'],input_keys=['detections_3d', 'bulk_people_detections_3d', 'bulk_seat_detections_3d'], output_keys=['free_seat', 'bulk_people_detections_3d', 'bulk_seat_detections3d', 'point']) - self.default = default - - def is_person_sitting_in_chair(self, person_contours, chair_contours): - return Polygon(person_contours).intersects(Polygon(chair_contours)) - - def execute(self, userdata): - if not userdata.bulk_seat_detections_3d: - return 'failed' - det1, p1 = userdata.bulk_seat_detections_3d.pop(0) - if not userdata.bulk_people_detections_3d: - userdata.free_seat = [det1, p1] - userdata.point = Point(*p1) - print(f"Chair at {p1} is free!") - robot_x, robot_y, _ = self.default.controllers.base_controller.get_current_pose() - r = np.array([robot_x, robot_y]) - p = np.array([p1[0], p1[1]]) - theta = np.degrees(np.arccos(np.dot(r, p) / (np.linalg.norm(r) * np.linalg.norm(p)))) - print(theta) - if theta > 10.0: - self.default.voice.sync_tts("Please sit down on this chair to my right") - elif theta < -10.0: - self.default.voice.sync_tts("Please sit down on this chair to my left") - else: - self.default.voice.sync_tts("Please sit down on this chair") - self.remaining_motions = [] - return 'succeeded' - for (det2, _) in userdata.bulk_people_detections_3d: - if not self.is_person_sitting_in_chair(np.array(det2.xyseg).reshape(-1, 2), np.array(det1.xyseg).reshape(-1, 2)): - userdata.free_seat = [det1, p1] - userdata.point = Point(*p1) - print(f"Chair at {p1} is free!") - robot_x, robot_y, _ = self.default.controllers.base_controller.get_current_pose() - r = np.array([robot_x, robot_y]) - p = np.array([p1[0], p1[1]]) - theta = np.degrees(np.arccos(np.dot(r, p) / (np.linalg.norm(r) * np.linalg.norm(p)))) - print(theta) - if theta > 10.0: - self.default.voice.sync_tts("Please sit down on this chair to my right") - elif theta < -10.0: - self.default.voice.sync_tts("Please sit down on this chair to my left") - else: - self.default.voice.sync_tts("Please sit down on this chair") - self.remaining_motions = [] - return 'succeeded' - return 'not_done' - - class PointToChair(smach.State): - - def __init__(self, default): - smach.State.__init__(self, outcomes=['succeeded'], input_keys=['point']) - self.default = default - - def execute(self, userdata): - - pm_goal = PlayMotionGoal(motion_name="back_to_default_head", skip_planning=True) - self.default.pm.send_goal_and_wait(pm_goal) - - self.default.controllers.base_controller.sync_face_to(userdata.point.x, userdata.point.y) - - pm_goal = PlayMotionGoal(motion_name="raise_torso", skip_planning=True) - self.default.pm.send_goal_and_wait(pm_goal) - - pm_goal = PlayMotionGoal(motion_name="point", skip_planning=False) - self.default.pm.send_goal_and_wait(pm_goal) - - rospy.sleep(5.0) - - pm_goal = PlayMotionGoal(motion_name="home", skip_planning=False) - self.default.pm.send_goal_and_wait(pm_goal) - - return 'succeeded' - - - def __init__(self, default): - smach.StateMachine.__init__(self, outcomes=['succeeded', 'failed']) - self.default = default - self.userdata.filter = ["person", "chair"] - self.userdata.bulk_people_detections_3d = [] - self.userdata.bulk_seat_detections_3d = [] - self.userdata.depth_topic = "xtion/depth_registered/points" - - with self: - smach.StateMachine.add('LOOK', self.Look(self.default), transitions={'not_done' : 'DETECT_OBJECTS_3D', 'done' : 'CHECK_SEAT'}) - smach.StateMachine.add('DETECT_OBJECTS_3D', DetectObjects3D(), transitions={'succeeded' : 'PROCESS_DETECTIONS', 'failed' : 'failed'}) - smach.StateMachine.add('PROCESS_DETECTIONS', self.ProcessDetections(), transitions={'succeeded' : 'LOOK'}) - smach.StateMachine.add('CHECK_SEAT', self.CheckSeat(self.default), transitions={'succeeded' : 'FINALISE_SEAT', 'failed' : 'failed', 'not_done': 'CHECK_SEAT'}) - smach.StateMachine.add('FINALISE_SEAT', self.PointToChair(self.default), transitions={'succeeded' : 'succeeded'}) - -if __name__ == "__main__": - from receptionist import Default - rospy.init_node("test_look_for_seats") - default = Default() - sm = LookForSeats(default) - sm.execute() \ No newline at end of file diff --git a/tasks/receptionist/src/receptionist/states/seat_guest.py b/tasks/receptionist/src/receptionist/states/seat_guest.py new file mode 100755 index 000000000..4090ea318 --- /dev/null +++ b/tasks/receptionist/src/receptionist/states/seat_guest.py @@ -0,0 +1,148 @@ +import smach + +from typing import List +from shapely.geometry import Polygon + +import numpy as np + +from lasr_skills import PlayMotion, Detect3DInArea, LookToPoint, Say + + +class SeatGuest(smach.StateMachine): + + _motions: List[str] = ["look_down_left", "look_down_right", "look_down_centre"] + + class ProcessDetections(smach.State): + + def __init__(self): + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + input_keys=[ + "detections_3d", + ], + output_keys=["seat_position"], + ) + + def execute(self, userdata) -> str: + seat_detections = [ + det for det in userdata.detections_3d if det.name == "chair" + ] + person_detections = [ + det for det in userdata.detections_3d if det.name == "person" + ] + + person_polygons: List[Polygon] = [ + Polygon(np.array(person.xyseg).reshape(-1, 2)) + for person in person_detections + ] + + print( + f"There are {len(seat_detections)} seats and {len(person_detections)} people." + ) + + for seat in seat_detections: + + seat_polygon: Polygon = Polygon(np.array(seat.xyseg).reshape(-1, 2)) + seat_is_empty: bool = True + for person_polygon in person_polygons: + if person_polygon.intersects(seat_polygon): + seat_is_empty = False + print(person_polygon.intersection(seat_polygon)) + break + + if seat_is_empty: + userdata.seat_position = seat.point + print(seat.point) + return "succeeded" + + return "failed" + + def __init__( + self, + seat_area: Polygon, + ): + smach.StateMachine.__init__(self, outcomes=["succeeded", "failed"]) + with self: + + # TODO: stop doing this + self.userdata.people_detections = [] + self.userdata.seat_detections = [] + + motion_iterator = smach.Iterator( + outcomes=["succeeded", "failed"], + it=self._motions, + it_label="motion", + input_keys=["people_detections", "seat_detections"], + output_keys=["seat_position"], + exhausted_outcome="failed", + ) + + with motion_iterator: + + container_sm = smach.StateMachine( + outcomes=["succeeded", "failed", "continue"], + input_keys=["motion", "people_detections", "seat_detections"], + output_keys=["seat_position"], + ) + + with container_sm: + smach.StateMachine.add( + "LOOK", + PlayMotion(), + transitions={ + "succeeded": "DETECT", + "aborted": "failed", + "preempted": "failed", + }, + remapping={"motion_name": "motion"}, + ) + smach.StateMachine.add( + "DETECT", + Detect3DInArea(seat_area, filter=["chair", "person"]), + transitions={"succeeded": "CHECK", "failed": "failed"}, + ) + smach.StateMachine.add( + "CHECK", + self.ProcessDetections(), + transitions={"succeeded": "succeeded", "failed": "continue"}, + ) + + smach.Iterator.set_contained_state( + "CONTAINER_SM", container_sm, loop_outcomes=["continue"] + ) + + smach.StateMachine.add( + "MOTION_ITERATOR", + motion_iterator, + transitions={"succeeded": "LOOK_TO_POINT", "failed": "failed"}, + ) + smach.StateMachine.add( + "LOOK_TO_POINT", + LookToPoint(), + transitions={ + "succeeded": "SAY_SIT", + "aborted": "failed", + "preempted": "failed", + }, + remapping={"point": "seat_position"}, + ) + smach.StateMachine.add( + "SAY_SIT", + Say("Please sit in the seat that I am looking at."), + transitions={ + "succeeded": "RESET_HEAD", + "aborted": "failed", + "preempted": "failed", + }, + ) # TODO: sleep after this. + + smach.StateMachine.add( + "RESET_HEAD", + PlayMotion("look_centre"), + transitions={ + "succeeded": "succeeded", + "aborted": "failed", + "preempted": "failed", + }, + ) diff --git a/tasks/receptionist/src/receptionist/states/start.py b/tasks/receptionist/src/receptionist/states/start.py deleted file mode 100644 index 3e5f0eed9..000000000 --- a/tasks/receptionist/src/receptionist/states/start.py +++ /dev/null @@ -1,20 +0,0 @@ -import smach -import rospy -from tiago_controllers.helpers.pose_helpers import get_pose_from_param - - -class Start(smach.State): - def __init__(self, default): - smach.State.__init__(self, outcomes=['succeeded']) - self.default = default - - def execute(self, userdata): - self.default.voice.speak("Start of task.") - rospy.set_param("guest2/drink","unknown") - rospy.set_param("guest1/drink","unknown") - rospy.set_param("guestcount/count",0) - - res = self.default.controllers.base_controller.ensure_sync_to_pose(get_pose_from_param('/start/pose')) - rospy.logerr(res) - - return 'succeeded' diff --git a/worksheets/package.xml b/worksheets/package.xml index fdd316601..b01d124f6 100644 --- a/worksheets/package.xml +++ b/worksheets/package.xml @@ -7,7 +7,7 @@ - jared + Jared Swift