Skip to content

Commit

Permalink
Merge pull request #20 from AlexanderKhazatsky/main
Browse files Browse the repository at this point in the history
Depth conversion + jerking fix
  • Loading branch information
kpertsch authored Oct 1, 2024
2 parents ca54513 + ad3398d commit 13413ca
Show file tree
Hide file tree
Showing 6 changed files with 598 additions and 125 deletions.
13 changes: 12 additions & 1 deletion droid/franka/robot.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ def launch_robot(self):
self._gripper = GripperInterface(ip_address="localhost")
self._max_gripper_width = self._gripper.metadata.max_width
self._ik_solver = RobotIKSolver()
self._controller_not_loaded = False

def kill_controller(self):
self._robot_process.kill()
Expand Down Expand Up @@ -84,7 +85,16 @@ def update_joints(self, command, velocity=False, blocking=False, cartesian_noise

def helper_non_blocking():
if not self._robot.is_running_policy():
self._controller_not_loaded = True
self._robot.start_cartesian_impedance()
timeout = time.time() + 5
while not self._robot.is_running_policy():
time.sleep(0.01)
if time.time() > timeout:
self._robot.start_cartesian_impedance()
timeout = time.time() + 5

self._controller_not_loaded = False
try:
self._robot.update_desired_joint_positions(command)
except grpc.RpcError:
Expand All @@ -101,7 +111,8 @@ def helper_non_blocking():

self._robot.start_cartesian_impedance()
else:
run_threaded_command(helper_non_blocking)
if not self._controller_not_loaded:
run_threaded_command(helper_non_blocking)

def update_gripper(self, command, velocity=True, blocking=False):
if velocity:
Expand Down
26 changes: 26 additions & 0 deletions droid/postprocessing/parse.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,10 @@
from datetime import datetime
from pathlib import Path
from typing import Dict, Optional, Tuple
import os

import h5py
import json

from droid.postprocessing.schema import TRAJECTORY_SCHEMA

Expand Down Expand Up @@ -42,6 +44,30 @@ def parse_user(
# Invalid/Incomplete HDF5 File --> return invalid :: (None, None)
return None, None

def parse_existing_metadata(trajectory_dir: str):
dir_path = os.path.abspath(trajectory_dir)
all_filepaths = [entry.path for entry in os.scandir(dir_path) if entry.is_file()]
for filepath in all_filepaths:
if 'metadata' in filepath:
with open(filepath) as json_file:
metadata = json.load(json_file)
return metadata
return None

def parse_data_directory(data_dir: str, lab_agnostic: bool = False, process_failures: bool = False):
if lab_agnostic:
data_dirs = [p for p in data_dir.iterdir() if p.is_dir()]
else:
data_dirs = [data_dir]

paths_to_index = []
for curr_dir in data_dirs:
paths_to_index += [(p, p.name) for p in [curr_dir / "success"]]
if process_failures:
paths_to_index += [(p, p.name) for p in [curr_dir / "failure"]]

return paths_to_index


def parse_timestamp(trajectory_dir: Path) -> str:
for pattern in [
Expand Down
117 changes: 88 additions & 29 deletions droid/postprocessing/stages.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,9 @@
import boto3
from tqdm import tqdm

from droid.postprocessing.parse import parse_datetime, parse_timestamp, parse_trajectory, parse_user
from droid.postprocessing.parse import parse_datetime, parse_timestamp, parse_trajectory, parse_user, parse_existing_metadata, parse_data_directory
from droid.postprocessing.util.svo2mp4 import convert_mp4s
from droid.postprocessing.util.svo2depth import convert_depths
from droid.postprocessing.util.validate import validate_day_dir, validate_metadata_record, validate_svo_existence


Expand All @@ -42,10 +43,16 @@ def run_indexing(
scanned_paths: Dict[str, Dict[str, str]],
indexed_uuids: Dict[str, Dict[str, str]],
errored_paths: Dict[str, Dict[str, str]],
search_existing_metadata: bool = False,
lab_agnostic: bool = False,
process_failures: bool = True,
) -> None:
"""Index data by iterating through each "success/ | failure/" --> <DAY>/ --> <TIMESTAMP>/ (specified trajectory)."""
progress = tqdm(desc="[*] Stage 1 =>> Indexing")
for outcome_dir, outcome in [(p, p.name) for p in [data_dir / "success", data_dir / "failure"]]:

paths_to_index = parse_data_directory(data_dir, lab_agnostic=lab_agnostic, process_failures=process_failures)

for outcome_dir, outcome in paths_to_index:
if outcome == "failure" and not outcome_dir.exists():
# Note: Some labs don't have failure trajectories...
continue
Expand All @@ -59,8 +66,22 @@ def run_indexing(
rel_trajectory_dir = str(trajectory_dir.relative_to(data_dir))

# Extract Timestamp (from `trajectory_dir`) and User, User ID (from `trajectory.h5`)
timestamp = parse_timestamp(trajectory_dir)
user, user_id = parse_user(trajectory_dir, aliases, members)
existing_metadata_found = False
if search_existing_metadata:
metadata = parse_existing_metadata(trajectory_dir)
if metadata is not None:
timestamp = metadata['timestamp']
user = metadata['user']
user_id = metadata['user_id']
uuid = metadata['uuid']
existing_metadata_found = True
if not (search_existing_metadata and existing_metadata_found):
timestamp = parse_timestamp(trajectory_dir)
user, user_id = parse_user(trajectory_dir, aliases, members)

# Create Trajectory UUID --> <LAB>+<USER_ID>+YYYY-MM-DD-{24 Hour}h-{Min}m-{Sec}s
uuid = f"{lab}+{user_id}+{timestamp}"

if user is None or user_id is None:
scanned_paths[outcome][rel_trajectory_dir] = True
errored_paths[outcome][rel_trajectory_dir] = (
Expand All @@ -72,9 +93,6 @@ def run_indexing(
progress.update()
continue

# Create Trajectory UUID --> <LAB>+<USER_ID>+YYYY-MM-DD-{24 Hour}h-{Min}m-{Sec}s
uuid = f"{lab}+{user_id}+{timestamp}"

# Verify SVO Files
if not validate_svo_existence(trajectory_dir):
scanned_paths[outcome][rel_trajectory_dir] = True
Expand Down Expand Up @@ -108,6 +126,11 @@ def run_processing(
processed_uuids: Dict[str, Dict[str, str]],
errored_paths: Dict[str, Dict[str, str]],
process_batch_limit: int = 250,
search_existing_metadata: bool = False,
extract_MP4_data: bool = True,
extract_depth_data: bool = False,
depth_resolution: tuple = (0,0),
depth_frequency: int = 1,
) -> None:
"""Iterate through each trajectory in `indexed_uuids` and 1) extract JSON metadata and 2) convert SVO -> MP4."""
for outcome in indexed_uuids:
Expand All @@ -117,36 +140,72 @@ def run_processing(
continue

trajectory_dir = data_dir / rel_trajectory_dir
timestamp = parse_timestamp(trajectory_dir)
user, user_id = parse_user(trajectory_dir, aliases, members)

existing_metadata_found = False
if search_existing_metadata:
metadata = parse_existing_metadata(trajectory_dir)
if metadata is not None:
metadata_record = metadata
timestamp = metadata['timestamp']
user = metadata['user']
user_id = metadata['user_id']
uuid = metadata['uuid']
existing_metadata_found = True
valid_parse = True
if not (search_existing_metadata and existing_metadata_found):
timestamp = parse_timestamp(trajectory_dir)
user, user_id = parse_user(trajectory_dir, aliases, members)

# Run Metadata Extraction --> JSON-serializable Data Record + Validation
valid_parse, metadata_record = parse_trajectory(
data_dir, trajectory_dir, uuid, lab, user, user_id, timestamp
)
# Run Metadata Extraction --> JSON-serializable Data Record + Validation
valid_parse, metadata_record = parse_trajectory(
data_dir, trajectory_dir, uuid, lab, user, user_id, timestamp
)
if not valid_parse:
errored_paths[outcome][rel_trajectory_dir] = "[Processing Error] JSON Metadata Parse Error"
totals["errored"][outcome] = len(errored_paths[outcome])
continue

# Convert SVOs --> MP4s
valid_convert, vid_paths = convert_mp4s(
data_dir,
trajectory_dir,
metadata_record["wrist_cam_serial"],
metadata_record["ext1_cam_serial"],
metadata_record["ext2_cam_serial"],
metadata_record["ext1_cam_extrinsics"],
metadata_record["ext2_cam_extrinsics"],
)
if not valid_convert:
errored_paths[outcome][rel_trajectory_dir] = "[Processing Error] Corrupted SVO / Failed Conversion"
totals["errored"][outcome] = len(errored_paths[outcome])
continue
if extract_MP4_data:
valid_convert, vid_paths = convert_mp4s(
data_dir,
trajectory_dir,
metadata_record["wrist_cam_serial"],
metadata_record["ext1_cam_serial"],
metadata_record["ext2_cam_serial"],
metadata_record["ext1_cam_extrinsics"],
metadata_record["ext2_cam_extrinsics"],
)
if not valid_convert:
errored_paths[outcome][rel_trajectory_dir] = "[Processing Error] Corrupted SVO / Failed Conversion"
totals["errored"][outcome] = len(errored_paths[outcome])
continue

# Extend Metadata Record
for key, vid_path in vid_paths.items():
metadata_record[key] = vid_path

# Convert SVOs --> Depth
if extract_depth_data:
valid_convert, vid_paths = convert_depths(
data_dir,
trajectory_dir,
metadata_record["wrist_cam_serial"],
metadata_record["ext1_cam_serial"],
metadata_record["ext2_cam_serial"],
metadata_record["ext1_cam_extrinsics"],
metadata_record["ext2_cam_extrinsics"],
resolution=depth_resolution,
frequency=depth_frequency,
)
if not valid_convert:
errored_paths[outcome][rel_trajectory_dir] = "[Processing Error] Corrupted SVO / Failed Conversion"
totals["errored"][outcome] = len(errored_paths[outcome])
continue

# Finalize Metadata Record
for key, vid_path in vid_paths.items():
metadata_record[key] = vid_path
# Extend Metadata Record
for key, vid_path in vid_paths.items():
metadata_record[key] = vid_path

# Validate
if not validate_metadata_record(metadata_record):
Expand Down
139 changes: 139 additions & 0 deletions droid/postprocessing/util/svo2depth.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
"""
svo2mp4.py
Utility scripts for using the ZED Python SDK and FFMPEG to convert raw `.svo` files to `.mp4` files (including "fused"
MP4s with multiple camera feeds).
"""
import os
import subprocess
from pathlib import Path
from typing import Dict, List, Optional, Tuple

import cv2
import numpy as np
import pyzed.sl as sl
from tqdm import tqdm
import imageio

def export_depth(
svo_file: Path,
depth_dir: Path,
stereo_view: str = "left",
resolution: tuple = (0,0),
frequency: int = 1,
show_progress: bool = False
) -> bool:
"""Reads an SVO file, dumping the export depth to the desired path; supports ZED SDK 3.8.* and 4.0.* ONLY."""

# Create Depth Folder
depth_out = depth_dir / f"{svo_file.stem}"
os.makedirs(depth_out, exist_ok=True)
sdk_version, use_sdk_4 = sl.Camera().get_sdk_version(), None
if not (sdk_version.startswith("4.0") or sdk_version.startswith("3.8")):
raise ValueError("Function `export_mp4` only supports ZED SDK 3.8 OR 4.0; if you see this, contact Sidd!")
else:
use_sdk_4 = sdk_version.startswith("4.0")

# Configure PyZED --> set mostly from SVO Path, don't convert in realtime!
initial_parameters = sl.InitParameters()
initial_parameters.set_from_svo_file(str(svo_file))
initial_parameters.svo_real_time_mode = False
initial_parameters.coordinate_units = sl.UNIT.MILLIMETER
initial_parameters.camera_image_flip = sl.FLIP_MODE.OFF
if stereo_view == "right":
initial_parameters.enable_right_side_measure = True
depth_measure = sl.MEASURE.DEPTH_RIGHT
else:
depth_measure = sl.MEASURE.DEPTH

# Create ZED Camera Object & Open SVO File
zed = sl.Camera()
err = zed.open(initial_parameters)
if err != sl.ERROR_CODE.SUCCESS:
zed.close()
return False

# Set Reading Resolution #
depth_resolution = sl.Resolution(*resolution)

# Create ZED Image Containers
assert stereo_view in {"left", "right"}, f"Invalid View to Export `{stereo_view}`!"
img_container = sl.Mat()

# SVO Export
n_frames, rt_parameters = zed.get_svo_number_of_frames(), sl.RuntimeParameters()
if show_progress:
pbar = tqdm(total=n_frames, desc=" => Exporting SVO Frames", leave=False)

# Read & Transcode all Frames
while True:
grabbed = zed.grab(rt_parameters)

# [NOTE SDK SEMANTICS] --> ZED SDK 4.0 introduces `sl.ERROR_CODE.END_OF_SVOFILE_REACHED`
if (grabbed == sl.ERROR_CODE.SUCCESS) or (use_sdk_4 and (grabbed == sl.ERROR_CODE.END_OF_SVOFILE_REACHED)):
svo_position = zed.get_svo_position()
should_extract = svo_position % frequency == 0
if should_extract:
zed.retrieve_measure(img_container, depth_measure, resolution=depth_resolution)
processed_depth = (img_container.get_data() * 1000).astype(np.uint16)
imageio.imwrite(depth_out / '{0}.png'.format(svo_position), processed_depth)

# Update Progress
if show_progress:
pbar.update()

# [NOTE SDK SEMANTICS] --> Check if we've reached the end of the video
if (svo_position >= (n_frames - 1)) or (use_sdk_4 and (grabbed == sl.ERROR_CODE.END_OF_SVOFILE_REACHED)):
break

# Cleanup & Return
zed.close()
if show_progress:
pbar.close()

return True


def convert_depths(
data_dir: Path,
demo_dir: Path,
wrist_serial: str,
ext1_serial: str,
ext2_serial: str,
ext1_extrinsics: List[float],
ext2_extrinsics: List[float],
resolution: tuple,
frequency: int,
do_fuse: bool = False,
) -> Tuple[bool, Optional[Dict[str, str]]]:
"""Convert each `serial.svo` to a valid MP4 file, updating the `data_record` path entries in-place."""
svo_path, depth_path = demo_dir / "recordings" / "SVO", demo_dir / "recordings" / "Depth"
os.makedirs(depth_path, exist_ok=True)
for svo_file in svo_path.iterdir():
successful_convert = export_depth(svo_file, depth_path, resolution=resolution, frequency=frequency, show_progress=True)
if not successful_convert:
return False, None

# Associate Ext1 / Ext2 with left/right positions relative to the robot base; use computed extrinsics.
# => Extrinsics are saved as a 6-dim vector of [pos; rot] where:
# - `pos` is (x, y, z) offset --> moving left of robot is +y, moving right is -y
# - `rot` is rotation offset as Euler (`R.from_matrix(rmat).as_euler("xyz")`)
# => Therefore we can compute `left = ext1_serial if ext1_extrinsics[1] > ext2_extrinsics[1]`
ext1_y, ext2_y = ext1_extrinsics[1], ext2_extrinsics[1]
left_serial = ext1_serial if ext1_y > ext2_y else ext2_serial
right_serial = ext2_serial if left_serial == ext1_serial else ext1_serial

# Create Dictionary of SVO/MP4 Paths
rel_svo_path, rel_depth_path = svo_path.relative_to(data_dir), depth_path.relative_to(data_dir)
record_paths = {
"wrist_svo_path": str(rel_svo_path / f"{wrist_serial}.svo"),
"wrist_depth_path": str(rel_depth_path / f"{wrist_serial}"),
"ext1_svo_path": str(rel_svo_path / f"{ext1_serial}.svo"),
"ext1_depth_path": str(rel_depth_path / f"{ext1_serial}"),
"ext2_svo_path": str(rel_svo_path / f"{ext2_serial}.svo"),
"ext2_depth_path": str(rel_depth_path / f"{ext2_serial}"),
"left_depth_path": str(rel_depth_path / f"{left_serial}"),
"right_depth_path": str(rel_depth_path / f"{right_serial}"),
}

return True, record_paths
Loading

0 comments on commit 13413ca

Please sign in to comment.