From fa215616f263b8f9629b429580094d3c07d4df33 Mon Sep 17 00:00:00 2001 From: Jared Swift Date: Thu, 18 Apr 2024 16:38:15 +0100 Subject: [PATCH] FindPerson & FindNamedPerson (#138) * chore: build lasr_shapely. * fix: typehints. * fix: more typehints. * WIP: FindPerson skill. * chore: rollback python version. * fix: FindPerson skill. * fix: use nanmean. * fix: tf. * feat(still WIP): FindPerson skill. * feat: navigation helpers package. * feat: go to person. * chore: bring back CATKIN_IGNORE. * chore: regenerate requirements.txt. * refactor: can import GoTo* skills again. * WIP: additional FindPerson states. * chore: catkin_python_setup. * Still WIP. * refactor/feat: slight cleanup + add hamiltionian path solver. * feat: basic FindPerson. * feat: option for fixed location. * feat: face person. * refactor: use GoToLocation. * fix/feat: ensure pcl data is reshaped correctly and compute min/max in z axis. * refactor/feat: tf whole pointcloud (it's more reliable) and output min/max points. * fix: allow for overriding pcl height / width. * fix: override width and height (tf flattens the pcl for some reason...) * fix: remappings. * feat: allow for fixed text. * fix: import. * WIP: FindNamedPerson. * feat: FindPerson and FindNamedPerson. * fix: setup input keys correctly. * refactor: drop GoToPerson. * fix: imports. * refactor: some cleanup * refactor: more cleanup! * fix: handling of waypoints, etc. * refactor: remove testing code. * feat: logwarn when no people are detected. --- .../helpers/cv2_pcl/src/cv2_pcl/__init__.py | 41 +++- .../helpers/navigation_helpers/CMakeLists.txt | 202 ++++++++++++++++++ common/helpers/navigation_helpers/package.xml | 59 +++++ common/helpers/navigation_helpers/setup.py | 10 + .../src/navigation_helpers/__init__.py | 29 +++ .../lasr_vision_msgs/msg/Detection3D.msg | 1 + .../lasr_vision_yolov8/examples/relay_3d | 2 +- .../lasr_vision_yolov8/requirements.txt | 10 +- .../src/lasr_vision_yolov8/yolo.py | 42 ++-- skills/src/lasr_skills/__init__.py | 7 +- skills/src/lasr_skills/find_named_person.py | 177 +++++++++++++++ skills/src/lasr_skills/find_person.py | 149 +++++++++++++ skills/src/lasr_skills/go_to_location.py | 8 +- 13 files changed, 701 insertions(+), 36 deletions(-) create mode 100644 common/helpers/navigation_helpers/CMakeLists.txt create mode 100644 common/helpers/navigation_helpers/package.xml create mode 100644 common/helpers/navigation_helpers/setup.py create mode 100644 common/helpers/navigation_helpers/src/navigation_helpers/__init__.py create mode 100755 skills/src/lasr_skills/find_named_person.py create mode 100755 skills/src/lasr_skills/find_person.py diff --git a/common/helpers/cv2_pcl/src/cv2_pcl/__init__.py b/common/helpers/cv2_pcl/src/cv2_pcl/__init__.py index 0c299f09a..05f3ef901 100644 --- a/common/helpers/cv2_pcl/src/cv2_pcl/__init__.py +++ b/common/helpers/cv2_pcl/src/cv2_pcl/__init__.py @@ -3,17 +3,24 @@ import ros_numpy as rnp import cv2 +from typing import Tuple, Union + Mat = np.ndarray -def pcl_to_cv2(pcl: PointCloud2) -> Mat: +def pcl_to_cv2( + pcl: PointCloud2, height: Union[int, None] = None, width: Union[int, None] = None +) -> Mat: """ Convert a given PointCloud2 message to a cv2 image """ + height = height or pcl.height + width = width or pcl.width + # Extract rgb image from pointcloud frame = np.fromstring(pcl.data, dtype=np.uint8) - frame = frame.reshape(pcl.height, pcl.width, 32) + frame = frame.reshape(height, width, -1) frame = frame[:, :, 16:19] # Ensure array is contiguous @@ -22,16 +29,25 @@ def pcl_to_cv2(pcl: PointCloud2) -> Mat: return frame -def seg_to_centroid(pcl: PointCloud2, xyseg: np.ndarray) -> np.ndarray: +def seg_to_centroid( + pcl: PointCloud2, + xyseg: np.ndarray, + height: Union[int, None] = None, + width: Union[int, None] = None, +) -> np.ndarray: """ Computes the centroid of a given segment in a pointcloud """ + height = height or pcl.height + width = width or pcl.width + # Convert xyseg to contours contours = xyseg.reshape(-1, 2) # cv2 image - cv_im = pcl_to_cv2(pcl) + cv_im = pcl_to_cv2(pcl, height, width) + # Compute mask from contours mask = np.zeros(shape=cv_im.shape[:2]) cv2.fillPoly(mask, pts=[contours], color=(255, 255, 255)) @@ -44,6 +60,7 @@ def seg_to_centroid(pcl: PointCloud2, xyseg: np.ndarray) -> np.ndarray: # Unpack pointcloud into xyz array pcl_xyz = rnp.point_cloud2.pointcloud2_to_xyz_array(pcl, remove_nans=False) + pcl_xyz = pcl_xyz.reshape(height, width, 3) # Extract points of interest xyz_points = [pcl_xyz[x][y] for x, y in indices] @@ -51,16 +68,27 @@ def seg_to_centroid(pcl: PointCloud2, xyseg: np.ndarray) -> np.ndarray: return np.nanmean(xyz_points, axis=0) -def bb_to_centroid(pcl: PointCloud2, x: int, y: int, w: int, h: int) -> np.ndarray: +def bb_to_centroid( + pcl: PointCloud2, + x: int, + y: int, + w: int, + h: int, + height: Union[int, None] = None, + width: Union[int, None] = None, +) -> np.ndarray: """ Computes the centroid of a given bounding box in a pointcloud """ + height = height or pcl.height + width = width or pcl.width + # Convert xywh to bounding box coordinates. x1, y1, x2, y2 = x, y, x + w, y + h # cv2 image - frame = pcl_to_cv2(pcl) + frame = pcl_to_cv2(pcl, height, width) # Compute mask from bounding box mask = np.zeros(shape=frame.shape[:2]) mask[y1:y2, x1:x2] = 255 # bounding box dimensions @@ -72,6 +100,7 @@ def bb_to_centroid(pcl: PointCloud2, x: int, y: int, w: int, h: int) -> np.ndarr # Unpack pointcloud into xyz array pcl_xyz = rnp.point_cloud2.pointcloud2_to_xyz_array(pcl, remove_nans=False) + pcl_xyz = pcl_xyz.reshape(height, width, 3) # Extract points of interest xyz_points = [pcl_xyz[x][y] for x, y in indices] diff --git a/common/helpers/navigation_helpers/CMakeLists.txt b/common/helpers/navigation_helpers/CMakeLists.txt new file mode 100644 index 000000000..457b6155a --- /dev/null +++ b/common/helpers/navigation_helpers/CMakeLists.txt @@ -0,0 +1,202 @@ +cmake_minimum_required(VERSION 3.0.2) +project(navigation_helpers) + +## Compile as C++11, supported in ROS Kinetic and newer +# add_compile_options(-std=c++11) + +## Find catkin macros and libraries +## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) +## is used, also find other catkin packages +find_package(catkin REQUIRED geometry_msgs) + +## System dependencies are found with CMake's conventions +# find_package(Boost REQUIRED COMPONENTS system) + + +## Uncomment this if the package has a setup.py. This macro ensures +## modules and global scripts declared therein get installed +## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html +catkin_python_setup() + +################################################ +## Declare ROS messages, services and actions ## +################################################ + +## To declare and build messages, services or actions from within this +## package, follow these steps: +## * Let MSG_DEP_SET be the set of packages whose message types you use in +## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). +## * In the file package.xml: +## * add a build_depend tag for "message_generation" +## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET +## * If MSG_DEP_SET isn't empty the following dependency has been pulled in +## but can be declared for certainty nonetheless: +## * add a exec_depend tag for "message_runtime" +## * In this file (CMakeLists.txt): +## * add "message_generation" and every package in MSG_DEP_SET to +## find_package(catkin REQUIRED COMPONENTS ...) +## * add "message_runtime" and every package in MSG_DEP_SET to +## catkin_package(CATKIN_DEPENDS ...) +## * uncomment the add_*_files sections below as needed +## and list every .msg/.srv/.action file to be processed +## * uncomment the generate_messages entry below +## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) + +## Generate messages in the 'msg' folder +# add_message_files( +# FILES +# Message1.msg +# Message2.msg +# ) + +## Generate services in the 'srv' folder +# add_service_files( +# FILES +# Service1.srv +# Service2.srv +# ) + +## Generate actions in the 'action' folder +# add_action_files( +# FILES +# Action1.action +# Action2.action +# ) + +## Generate added messages and services with any dependencies listed here +# generate_messages( +# DEPENDENCIES +# std_msgs # Or other packages containing msgs +# ) + +################################################ +## Declare ROS dynamic reconfigure parameters ## +################################################ + +## To declare and build dynamic reconfigure parameters within this +## package, follow these steps: +## * In the file package.xml: +## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" +## * In this file (CMakeLists.txt): +## * add "dynamic_reconfigure" to +## find_package(catkin REQUIRED COMPONENTS ...) +## * uncomment the "generate_dynamic_reconfigure_options" section below +## and list every .cfg file to be processed + +## Generate dynamic reconfigure parameters in the 'cfg' folder +# generate_dynamic_reconfigure_options( +# cfg/DynReconf1.cfg +# cfg/DynReconf2.cfg +# ) + +################################### +## catkin specific configuration ## +################################### +## The catkin_package macro generates cmake config files for your package +## Declare things to be passed to dependent projects +## INCLUDE_DIRS: uncomment this if your package contains header files +## LIBRARIES: libraries you create in this project that dependent projects also need +## CATKIN_DEPENDS: catkin_packages dependent projects also need +## DEPENDS: system dependencies of this project that dependent projects also need +catkin_package( +# INCLUDE_DIRS include +# LIBRARIES navigation_helpers +# CATKIN_DEPENDS other_catkin_pkg +# DEPENDS system_lib +) + +########### +## Build ## +########### + +## Specify additional locations of header files +## Your package locations should be listed before other locations +include_directories( +# include +# ${catkin_INCLUDE_DIRS} +) + +## Declare a C++ library +# add_library(${PROJECT_NAME} +# src/${PROJECT_NAME}/navigation_helpers.cpp +# ) + +## Add cmake target dependencies of the library +## as an example, code may need to be generated before libraries +## either from message generation or dynamic reconfigure +# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) + +## Declare a C++ executable +## With catkin_make all packages are built within a single CMake context +## The recommended prefix ensures that target names across packages don't collide +# add_executable(${PROJECT_NAME}_node src/navigation_helpers_node.cpp) + +## Rename C++ executable without prefix +## The above recommended prefix causes long target names, the following renames the +## target back to the shorter version for ease of user use +## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" +# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") + +## Add cmake target dependencies of the executable +## same as for the library above +# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) + +## Specify libraries to link a library or executable target against +# target_link_libraries(${PROJECT_NAME}_node +# ${catkin_LIBRARIES} +# ) + +############# +## Install ## +############# + +# all install targets should use catkin DESTINATION variables +# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html + +## Mark executable scripts (Python etc.) for installation +## in contrast to setup.py, you can choose the destination +# catkin_install_python(PROGRAMS +# scripts/my_python_script +# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +# ) + +## Mark executables for installation +## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html +# install(TARGETS ${PROJECT_NAME}_node +# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +# ) + +## Mark libraries for installation +## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html +# install(TARGETS ${PROJECT_NAME} +# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} +# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} +# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} +# ) + +## Mark cpp header files for installation +# install(DIRECTORY include/${PROJECT_NAME}/ +# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} +# FILES_MATCHING PATTERN "*.h" +# PATTERN ".svn" EXCLUDE +# ) + +## Mark other files for installation (e.g. launch and bag files, etc.) +# install(FILES +# # myfile1 +# # myfile2 +# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} +# ) + +############# +## Testing ## +############# + +## Add gtest based cpp test target and link libraries +# catkin_add_gtest(${PROJECT_NAME}-test test/test_navigation_helpers.cpp) +# if(TARGET ${PROJECT_NAME}-test) +# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) +# endif() + +## Add folders to be run by python nosetests +# catkin_add_nosetests(test) diff --git a/common/helpers/navigation_helpers/package.xml b/common/helpers/navigation_helpers/package.xml new file mode 100644 index 000000000..7745ec69e --- /dev/null +++ b/common/helpers/navigation_helpers/package.xml @@ -0,0 +1,59 @@ + + + navigation_helpers + 0.0.0 + The navigation_helpers package + + + + + Jared Swift + + + + + + TODO + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + catkin + + + + + + + + diff --git a/common/helpers/navigation_helpers/setup.py b/common/helpers/navigation_helpers/setup.py new file mode 100644 index 000000000..4cfc6694c --- /dev/null +++ b/common/helpers/navigation_helpers/setup.py @@ -0,0 +1,10 @@ +#!/usr/bin/env python3 + +from distutils.core import setup +from catkin_pkg.python_setup import generate_distutils_setup + +setup_args = generate_distutils_setup( + packages=["navigation_helpers"], package_dir={"": "src"} +) + +setup(**setup_args) diff --git a/common/helpers/navigation_helpers/src/navigation_helpers/__init__.py b/common/helpers/navigation_helpers/src/navigation_helpers/__init__.py new file mode 100644 index 000000000..969833b49 --- /dev/null +++ b/common/helpers/navigation_helpers/src/navigation_helpers/__init__.py @@ -0,0 +1,29 @@ +from geometry_msgs.msg import ( + Point, + Pose, +) + +import numpy as np +from itertools import permutations + +from typing import Union, List + + +def euclidian_distance(p1: Point, p2: Point) -> float: + return ((p1.x - p2.x) ** 2 + (p1.y - p2.y) ** 2) ** 0.5 + + +def min_hamiltonian_path(start: Pose, poses: List[Pose]) -> Union[None, List[Pose]]: + best_order = None + min_distance = np.inf + + for perm in permutations(poses): + dist = euclidian_distance(start.position, perm[0].position) + for i in range(len(poses) - 1): + dist += euclidian_distance(perm[i].position, perm[i + 1].position) + + if dist < min_distance: + min_distance = dist + best_order = list(perm) + + return best_order diff --git a/common/vision/lasr_vision_msgs/msg/Detection3D.msg b/common/vision/lasr_vision_msgs/msg/Detection3D.msg index de41b3451..1a5fdea6e 100644 --- a/common/vision/lasr_vision_msgs/msg/Detection3D.msg +++ b/common/vision/lasr_vision_msgs/msg/Detection3D.msg @@ -15,4 +15,5 @@ string target_frame # This will be empty if a segmentation model was not used int32[] xyseg +# Point in map frame geometry_msgs/Point point \ No newline at end of file diff --git a/common/vision/lasr_vision_yolov8/examples/relay_3d b/common/vision/lasr_vision_yolov8/examples/relay_3d index 1ffc9f758..02a243bd5 100644 --- a/common/vision/lasr_vision_yolov8/examples/relay_3d +++ b/common/vision/lasr_vision_yolov8/examples/relay_3d @@ -53,4 +53,4 @@ def listener(): if __name__ == "__main__": - listener() \ No newline at end of file + listener() diff --git a/common/vision/lasr_vision_yolov8/requirements.txt b/common/vision/lasr_vision_yolov8/requirements.txt index e25cdeb1e..be2c4b632 100644 --- a/common/vision/lasr_vision_yolov8/requirements.txt +++ b/common/vision/lasr_vision_yolov8/requirements.txt @@ -4,10 +4,10 @@ contourpy==1.1.1 # via matplotlib cycler==0.12.1 # via matplotlib dill==0.3.7 # via -r requirements.in filelock==3.13.1 # via torch, triton -fonttools==4.49.0 # via matplotlib -fsspec==2024.2.0 # via torch +fonttools==4.50.0 # via matplotlib +fsspec==2024.3.0 # via torch idna==3.6 # via requests -importlib-resources==6.1.2 # via matplotlib +importlib-resources==6.3.1 # via matplotlib jinja2==3.1.3 # via torch kiwisolver==1.4.5 # via matplotlib markupsafe==2.1.5 # via jinja2 @@ -28,7 +28,7 @@ nvidia-nccl-cu12==2.19.3 # via torch nvidia-nvjitlink-cu12==12.4.99 # via nvidia-cusolver-cu12, nvidia-cusparse-cu12 nvidia-nvtx-cu12==12.1.105 # via torch opencv-python==4.9.0.80 # via ultralytics -packaging==23.2 # via matplotlib +packaging==24.0 # via matplotlib pandas==2.0.3 # via seaborn, ultralytics pillow==10.2.0 # via matplotlib, torchvision, ultralytics psutil==5.9.8 # via ultralytics @@ -50,4 +50,4 @@ typing-extensions==4.10.0 # via torch tzdata==2024.1 # via pandas ultralytics==8.0.168 # via -r requirements.in urllib3==2.2.1 # via requests -zipp==3.17.0 # via importlib-resources +zipp==3.18.1 # via importlib-resources diff --git a/common/vision/lasr_vision_yolov8/src/lasr_vision_yolov8/yolo.py b/common/vision/lasr_vision_yolov8/src/lasr_vision_yolov8/yolo.py index 342223979..4171de2de 100644 --- a/common/vision/lasr_vision_yolov8/src/lasr_vision_yolov8/yolo.py +++ b/common/vision/lasr_vision_yolov8/src/lasr_vision_yolov8/yolo.py @@ -18,7 +18,8 @@ from geometry_msgs.msg import Point, PointStamped import tf2_ros as tf -import tf2_geometry_msgs # noqa +import tf2_sensor_msgs # noqa +from tf2_sensor_msgs.tf2_sensor_msgs import do_transform_cloud # global tf buffer tf_buffer = tf.Buffer(cache_time=rospy.Duration(10)) @@ -108,6 +109,12 @@ def detect_3d( rospy.loginfo("Decoding") img = cv2_pcl.pcl_to_cv2(request.pcl) + # transform pcl to map frame + trans = tf_buffer.lookup_transform( + "map", request.pcl.header.frame_id, rospy.Time(0), rospy.Duration(1.0) + ) + pcl_map = do_transform_cloud(request.pcl, trans) + # load model rospy.loginfo("Loading model") model = load_model(request.dataset) @@ -132,26 +139,19 @@ def detect_3d( if has_segment_masks: detection.xyseg = result.masks.xy[i].flatten().astype(int).tolist() - centroid = cv2_pcl.seg_to_centroid(request.pcl, np.array(detection.xyseg)) - - point_stamped = PointStamped() - point_stamped.point = Point(*centroid) - point_stamped.header.frame_id = request.pcl.header.frame_id - point_stamped.header.stamp = rospy.Time(0) - - # TODO: handle tf errors properly - while not rospy.is_shutdown(): - try: - point_stamped = tf_buffer.transform(point_stamped, "map") - detection.point = point_stamped.point - break - except Exception as e: - rospy.logerr(e) - continue - - # publish to debug topic - if debug_point_publisher is not None: - markers.create_and_publish_marker(debug_point_publisher, point_stamped) + centroid = cv2_pcl.seg_to_centroid( + pcl_map, + np.array(detection.xyseg), + height=request.pcl.height, + width=request.pcl.width, + ) + detection.point = Point(*centroid) + + if debug_point_publisher is not None: + markers.create_and_publish_marker( + debug_point_publisher, + PointStamped(point=detection.point, header=pcl_map.header), + ) detected_objects.append(detection) diff --git a/skills/src/lasr_skills/__init__.py b/skills/src/lasr_skills/__init__.py index abb498c39..4a0fd04c0 100755 --- a/skills/src/lasr_skills/__init__.py +++ b/skills/src/lasr_skills/__init__.py @@ -2,15 +2,18 @@ from .detect_3d import Detect3D from .detect_3d_in_area import Detect3DInArea from .wait_for_person import WaitForPerson +from .say import Say from .wait_for_person_in_area import WaitForPersonInArea from .describe_people import DescribePeople from .look_to_point import LookToPoint from .play_motion import PlayMotion from .go_to_location import GoToLocation -from .go_to_semantic_location import GoToSemanticLocation -from .say import Say from .listen import Listen from .listen_for import ListenFor +from .ask_and_listen import AskAndListen +from .find_person import FindPerson +from .find_named_person import FindNamedPerson +from .go_to_semantic_location import GoToSemanticLocation from .receive_object import ReceiveObject from .handover_object import HandoverObject from .ask_and_listen import AskAndListen diff --git a/skills/src/lasr_skills/find_named_person.py b/skills/src/lasr_skills/find_named_person.py new file mode 100755 index 000000000..30258cf54 --- /dev/null +++ b/skills/src/lasr_skills/find_named_person.py @@ -0,0 +1,177 @@ +import smach +import rospy + +from lasr_skills import Detect3D, GoToLocation, AskAndListen +import navigation_helpers + +from geometry_msgs.msg import Pose, PoseWithCovarianceStamped + + +from typing import List + + +class FindNamedPerson(smach.StateMachine): + + class GetLocation(smach.State): + def __init__(self): + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + input_keys=["location_index", "waypoints"], + output_keys=["location"], + ) + + def execute(self, userdata): + userdata.location = userdata.waypoints[userdata.location_index] + return "succeeded" + + class GetPose(smach.State): + def __init__(self): + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + output_keys=["current_pose"], + ) + + def execute(self, userdata): + userdata.current_pose = rospy.wait_for_message( + "/amcl_pose", PoseWithCovarianceStamped + ).pose.pose + return "succeeded" + + class ComputePath(smach.State): + def __init__(self, waypoints: List[Pose]): + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + input_keys=["current_pose"], + output_keys=["waypoints"], + ) + self._waypoints = waypoints + + def execute(self, userdata): + userdata.waypoints = navigation_helpers.min_hamiltonian_path( + userdata.current_pose, self._waypoints + ) + return "succeeded" + + class HandleDetections(smach.State): + def __init__(self): + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + input_keys=["detections_3d"], + output_keys=["person_point"], + ) + + def execute(self, userdata): + if len(userdata.detections_3d.detected_objects) == 0: + rospy.logwarn("No person detected, returning failed.") + return "failed" + userdata.person_point = userdata.detections_3d.detected_objects[0].point + return "succeeded" + + class HandleResponse(smach.State): + def __init__(self): + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + input_keys=["transcribed_speech"], + ) + + def execute(self, userdata): + # TODO: make this smarter,e.g. levenshtein distance + if "yes" in userdata.transcribed_speech.lower(): + return "succeeded" + return "failed" + + def __init__(self, waypoints: List[Pose], name: str): + smach.StateMachine.__init__( + self, outcomes=["succeeded", "failed"], output_keys=["person_point"] + ) + + with self: + + smach.StateMachine.add( + "GET_POSE", + self.GetPose(), + transitions={"succeeded": "COMPUTE_PATH", "failed": "failed"}, + ) + + smach.StateMachine.add( + "COMPUTE_PATH", + self.ComputePath(waypoints), + transitions={"succeeded": "WAYPOINT_ITERATOR", "failed": "failed"}, + ) + + waypoint_iterator = smach.Iterator( + outcomes=["succeeded", "failed"], + it=lambda: range(len(waypoints)), + it_label="location_index", + input_keys=["waypoints"], + output_keys=["person_point"], + exhausted_outcome="failed", + ) + + with waypoint_iterator: + + container_sm = smach.StateMachine( + outcomes=["succeeded", "failed", "continue"], + input_keys=["location_index", "waypoints"], + output_keys=["person_point"], + ) + + with container_sm: + + smach.StateMachine.add( + "GET_LOCATION", + self.GetLocation(), + transitions={"succeeded": "GO_TO_LOCATION", "failed": "failed"}, + ) + + smach.StateMachine.add( + "GO_TO_LOCATION", + GoToLocation(), + transitions={ + "succeeded": "DETECT3D", + "failed": "failed", + }, + ) + smach.StateMachine.add( + "DETECT3D", + Detect3D(filter=["person"]), + transitions={ + "succeeded": "HANDLE_DETECTIONS", + "failed": "failed", + }, + ) + smach.StateMachine.add( + "HANDLE_DETECTIONS", + self.HandleDetections(), + transitions={ + "succeeded": "CHECK_NAME", + "failed": "continue", + }, + ) + smach.StateMachine.add( + "CHECK_NAME", + AskAndListen(f"I'm looking for {name}. Are you {name}?"), + transitions={ + "succeeded": "HANDLE_RESPONSE", + "failed": "failed", + }, + ) + smach.StateMachine.add( + "HANDLE_RESPONSE", + self.HandleResponse(), + transitions={"succeeded": "succeeded", "failed": "continue"}, + ) + + waypoint_iterator.set_contained_state( + "CONTAINER_STATE", container_sm, loop_outcomes=["continue"] + ) + smach.StateMachine.add( + "WAYPOINT_ITERATOR", + waypoint_iterator, + {"succeeded": "succeeded", "failed": "failed"}, + ) diff --git a/skills/src/lasr_skills/find_person.py b/skills/src/lasr_skills/find_person.py new file mode 100755 index 000000000..94dadccc1 --- /dev/null +++ b/skills/src/lasr_skills/find_person.py @@ -0,0 +1,149 @@ +import smach +import rospy + +from lasr_skills import Detect3D, GoToLocation +import navigation_helpers + +from geometry_msgs.msg import Pose, PoseWithCovarianceStamped + +from typing import List + + +class FindPerson(smach.StateMachine): + + class GetLocation(smach.State): + def __init__(self): + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + input_keys=["location_index", "waypoints"], + output_keys=["location"], + ) + + def execute(self, userdata): + userdata.location = userdata.waypoints[userdata.location_index] + return "succeeded" + + class GetPose(smach.State): + def __init__(self): + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + output_keys=["current_pose"], + ) + + def execute(self, userdata): + userdata.current_pose = rospy.wait_for_message( + "/amcl_pose", PoseWithCovarianceStamped + ).pose.pose + return "succeeded" + + class ComputePath(smach.State): + def __init__(self, waypoints: List[Pose]): + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + input_keys=["current_pose"], + output_keys=["waypoints"], + ) + self._waypoints = waypoints + + def execute(self, userdata): + userdata.waypoints = navigation_helpers.min_hamiltonian_path( + userdata.current_pose, self._waypoints + ) + return "succeeded" + + class HandleDetections(smach.State): + def __init__(self): + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + input_keys=["detections_3d"], + output_keys=["person_point"], + ) + + def execute(self, userdata): + if len(userdata.detections_3d.detected_objects) == 0: + rospy.logwarn("No person detected, returning failed.") + return "failed" + userdata.person_point = userdata.detections_3d.detected_objects[0].point + return "succeeded" + + def __init__(self, waypoints: List[Pose]): + smach.StateMachine.__init__( + self, outcomes=["succeeded", "failed"], output_keys=["person_point"] + ) + + with self: + + smach.StateMachine.add( + "GET_POSE", + self.GetPose(), + transitions={"succeeded": "COMPUTE_PATH", "failed": "failed"}, + ) + + smach.StateMachine.add( + "COMPUTE_PATH", + self.ComputePath(waypoints), + transitions={"succeeded": "WAYPOINT_ITERATOR", "failed": "failed"}, + ) + + waypoint_iterator = smach.Iterator( + outcomes=["succeeded", "failed"], + it=lambda: range(len(waypoints)), + it_label="location_index", + input_keys=["waypoints"], + output_keys=["person_point"], + exhausted_outcome="failed", + ) + + with waypoint_iterator: + + container_sm = smach.StateMachine( + outcomes=["succeeded", "failed", "continue"], + input_keys=["location_index", "waypoints"], + output_keys=["person_point"], + ) + + with container_sm: + + smach.StateMachine.add( + "GET_LOCATION", + self.GetLocation(), + transitions={"succeeded": "GO_TO_LOCATION", "failed": "failed"}, + ) + + smach.StateMachine.add( + "GO_TO_LOCATION", + GoToLocation(), + transitions={ + "succeeded": "DETECT3D", + "failed": "failed", + }, + ) + smach.StateMachine.add( + "DETECT3D", + Detect3D(filter=["person"]), + transitions={ + "succeeded": "HANDLE_DETECTIONS", + "failed": "failed", + }, + ) + smach.StateMachine.add( + "HANDLE_DETECTIONS", + self.HandleDetections(), + transitions={ + "succeeded": "succeeded", + "failed": "continue", + }, + ) + + waypoint_iterator.set_contained_state( + "CONTAINER_STATE", container_sm, loop_outcomes=["continue"] + ) + smach.StateMachine.add( + "WAYPOINT_ITERATOR", + waypoint_iterator, + {"succeeded": "succeeded", "failed": "failed"}, + ) diff --git a/skills/src/lasr_skills/go_to_location.py b/skills/src/lasr_skills/go_to_location.py index 562aeaf61..4a4320417 100755 --- a/skills/src/lasr_skills/go_to_location.py +++ b/skills/src/lasr_skills/go_to_location.py @@ -26,7 +26,13 @@ class GoToLocation(smach.StateMachine): def __init__(self, location: Union[Pose, None] = None): - super(GoToLocation, self).__init__(outcomes=["succeeded", "failed"]) + + if location is not None: + super(GoToLocation, self).__init__(outcomes=["succeeded", "failed"]) + else: + super(GoToLocation, self).__init__( + outcomes=["succeeded", "failed"], input_keys=["location"] + ) IS_SIMULATION = ( "/pal_startup_control/start" not in rosservice.get_service_list()