Skip to content

Commit

Permalink
Merge branch 's2025_teamA' of https://github.com/krishauser/GEMstack
Browse files Browse the repository at this point in the history
…into s2025_teamA
  • Loading branch information
rty727433198 committed Feb 27, 2025
2 parents 118c1c7 + 54b1bd7 commit 88cf406
Show file tree
Hide file tree
Showing 35 changed files with 1,045 additions and 1,192 deletions.
Binary file modified GEMstack/.DS_Store
Binary file not shown.
2 changes: 1 addition & 1 deletion GEMstack/knowledge/calibration/gem_e4.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
calibration_date: "2025-02-25" # Date of calibration YYYY-MM-DD
reference: rear_axle_center # rear axle center
rear_axle_height: 0.33 # height of rear axle center above flat ground
rear_axle_height: 0.2794 # height of rear axle center above flat ground
gnss_location: [1.10,0,1.62] # meters, taken from https://github.com/hangcui1201/POLARIS_GEM_e2_Real/blob/main/vehicle_drivers/gem_gnss_control/scripts/gem_gnss_tracker_stanley_rtk.py. Note conflict with pure pursuit location?
gnss_yaw: 0.0 # radians
top_lidar: !include "gem_e4_ouster.yaml"
Expand Down
9 changes: 9 additions & 0 deletions GEMstack/knowledge/calibration/gem_e4_lidar_cam.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
# Calibration for top lidar->front rgb camera on GEM e4
# Calibration Date: 02/25/2025 05:09

lidar_to_camera: [
[ 2.89748006e-02, -9.99580136e-01, 3.68439439e-05, -3.07300513e-02],
[-9.49930618e-03, -3.12215512e-04, -9.99954834e-01, -3.86689354e-01],
[ 9.99534999e-01, 2.89731321e-02, -9.50437214e-03, -6.71425124e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]
]
5 changes: 3 additions & 2 deletions GEMstack/knowledge/calibration/gem_e4_oak.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
# Calibration for front rgb camera->vehicle on GEM e4
# Calibration Date: 02/25/2025 05:09

reference: rear_axle_center # rear axle center
rotation: [[ 0.00349517, -0.03239524, 0.99946903], # rotation camera -> vehicle
rotation: [[ 0.00349517, -0.03239524, 0.99946903], # rotation component of camera -> vehicle matrix
[-0.99996547, 0.00742285, 0.0037375],
[-0.00753999, -0.99944757, -0.03236817]] # rotation matrix mapping z to forward, x to left, y to down, guesstimated
center_position: [ 1.75864913, 0.01238124, 1.54408419] # translation camera -> vehicle meters, center camera, guesstimated
center_position: [ 1.75864913, 0.01238124, 1.54408419] # translation component of camera -> vehicle matrix (in meters)
7 changes: 4 additions & 3 deletions GEMstack/knowledge/calibration/gem_e4_ouster.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
# Calibration date: 02/25/2025 05:09
# Calibration for top lidar->vehicle on GEM e4
# Calibration Date: 02/25/2025 05:09

reference: rear_axle_center # rear axle center
position: [1.1, 0.03773044170906172, 1.9525244316515322] # lidar to vehicle frame translation
rotation: [[ 0.99941328, 0.02547416, 0.02289458], # lidar to vehicle rotation matrix
position: [1.1, 0.03773044170906172, 1.9525244316515322] # top lidar to vehicle matrix translation component
rotation: [[ 0.99941328, 0.02547416, 0.02289458], # top lidar to vehicle matrix rotation component
[-0.02530855, 0.99965159, -0.00749488],
[-0.02307753, 0.00691106, 0.99970979]]
2 changes: 1 addition & 1 deletion GEMstack/knowledge/defaults/computation_graph.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ components:
inputs: [vehicle, roadgraph, agents]
outputs: agent_intents
- relations_estimation:
inputs: [vehicle, roadgraph, agents, obstacles]
inputs: all
outputs: relations
- predicate_evaluation:
inputs: [vehicle, roadgraph, agents, obstacles]
Expand Down
12 changes: 12 additions & 0 deletions GEMstack/knowledge/defaults/current.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,18 @@ control:
pid_p: 0.8
pid_i: 0.03
pid_d: 0.0
planning:
longitudinal_plan:
mode: 'real' # 'real' or 'sim'
yielder: 'expert' # 'expert', 'analytic', or 'simulation'
planner: 'dt' # 'milestone', 'dt', or 'dx'
desired_speed: 1.0
acceleration: 0.5
max_deceleration: 6.0
deceleration: 2.0
min_deceleration: 0.5
yield_deceleration: 0.5


#configure the simulator, if using
simulator:
Expand Down
10 changes: 10 additions & 0 deletions GEMstack/mathutils/quadratic_equation.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,16 @@
import math

def quad_root(a : float, b : float, c : float) -> float:
"""Calculates the roots of a quadratic equation
Args:
a (float): coefficient of x^2
b (float): coefficient of x
c (float): constant term
Returns:
float: returns all valid roots of the quadratic equation
"""
x1 = (-b + max(0,(b**2 - 4*a*c))**0.5)/(2*a)
x2 = (-b - max(0,(b**2 - 4*a*c))**0.5)/(2*a)

Expand Down
92 changes: 82 additions & 10 deletions GEMstack/offboard/calibration/README.md
Original file line number Diff line number Diff line change
@@ -1,10 +1,40 @@
# Data Capture

### Data Capture Script (`capture_ouster_oak.py`)

Set up on vehicle:

1. Run roscore in a terminal
2. Run catkin_make in gem stack
3. Run source /demo_ws/devel/setup.bash
4. Run roslaunch basic_launch sensor_init.launch to get all sensors running

Default script usage:

python3 capture_ouster_oak.py

Additional Options:
1. To specify directory to save data, use --folder "path to save location" (default save folder is data)
2. To specify frequency of data capture, use --frequency put_frequency_in_hz_here (default is 2 hz)
3. To specify the what index the data should start being saved as, use --start_index desired_index_here (default is 1)


# GEMstack Offboard Calibration

This repository contains tools for offline calibration of LiDAR and camera sensors to the vehicle coordinate system on the GEM E4 platform. The calibration pipeline consists of three stages:
This section explains tools for offline calibration of LiDAR and camera sensors to the vehicle coordinate system on the GEM E4 platform. The calibration pipeline consists of three stages:

1. **LiDAR-to-Vehicle**
2. **Camera-to-Vehicle**
3. **LiDAR-to-Camera**
3. **LiDAR-to-Camera**

## Dependencies

```
pip install opencv-python scipy numpy matplotlib argparse pyyaml pyvis
# If getting some issues, try:
pip install trame ipywidgets trame-vuetify
```

---

Expand All @@ -26,40 +56,58 @@ This repository contains tools for offline calibration of LiDAR and camera senso

**Usage**:

python3 lidar_to_vehicle.py # Edit LiDAR data paths in script
Our script assumes data is formated as: colorx.png, lidarx.npz, depthx.tif where x is some index number. x is chosen using the --index flag seen below. Set it based on what data sample you want to use for calibration.

python3 lidar_to_vehicle.py --data_path "path to data folder" --index INDEX_NUM

Optionally, use --vis flag for visualizations throughout the computation process


### 2. CAMERA-to-Vehicle Calibration (`camera_to_vehicle_manual.py`)
**Method**:
1. Capture camera intrinsics using camera_info.py (ROS node)
2. Manually select 4 matching points in RGB image and LiDAR cloud
2. Manually select 8 matching points in RGB image and LiDAR cloud (can adjust variable to select more pairs)
3. Solve PnP problem to compute camera extrinsics

**Usage**:
1. Get camera intrinsics:
rosrun offboard\calibration\camera_info.py # Prints intrinsic matrix
2. Update camera_in in script with intrinsics
3. Update data paths in script
3. Our script assumes data is formated as: colorx.png, lidarx.npz, depthx.tif where x is some index number. x is chosen using the --index flag seen below. Set it based on what data sample you want to use for calibration.

The script also reads the lidar_to_vehicle matrix from the gem_e4_ouster.yaml file so ensure that is up to date.

4. Run calibration:
python3 camera_to_vehicle_manual.py

python3 camera_to_vehicle_manual.py --data_path "path to data folder" --index INDEX_NUM --config "path to gem_e4_ouster.yaml"

5. There will be a pop-up of the rgb image depending on the data index you chose. Left click and select as many points as manually specified in the script.
6. The window will automatically close and reveal a pop-up of the lidar point cloud.
7. Choose the corresponding lidar points with hovering over the point and right-clicking, or pressing P
8. Once selected all corresponding points, close the window and the output will be printed.

### 3. LIDAR-to-CAMERA Calibration (`lidar_to_camera.py`)
**Method**:
1. Invert Camera-to-Vehicle matrix
2. Multiply with LiDAR-to-Vehicle matrix

**Usage**:

```
python3 lidar_to_camera.py # Ensure T_lidar_vehicle and T_camera_vehicle matrices are updated

```

### Visualization Tools

**3D Alignment Check**:
1. Use vis() function in scripts to view calibrated LiDAR/camera clouds
2. Toggle VIS = True in lidar_to_vehicle.py for ground plane/object visualization
3. Use test_transforms.py to visualize lidar point cloud on top of png image. Helps verify accuracy of lidar->camera.
2. Use --vis flag when running lidar_to_vehicle.py for ground plane/object visualization
3. Use test_transforms.py to visualize the transformed lidar point cloud to camera frame on top of the corresponding png image. Helps verify accuracy of lidar->camera.

Usage of test_transforms.py:
```
python3 test_transforms.py --data_path "path to data folder" --index INDEX_NUM
```
Data path is the directory where lidar npz and color png files are located, index number is whichever lidar/png pair you want to evaluate. Ex. lidar1.npz color1.png where INDEX_NUM is 1 (--index 1)

**Projection Validation**:
1. RGB image overlaid with transformed LiDAR points (Z-buffered)
Expand All @@ -80,7 +128,31 @@ python3 lidar_to_camera.py # Ensure T_lidar_vehicle and T_camera_vehicle matri
4. Validation: Use pyvista visualizations to verify alignment


### Results

Our resultant transformation matrices are the following:
```
T_camera_vehicle = np.array([[ 0.00349517, -0.03239524, 0.99946903, 1.75864913],
[-0.99996547, 0.00742285, 0.0037375, 0.01238124],
[-0.00753999, -0.99944757, -0.03236817, 1.54408419],
[0.000000000, 0.00000000, 0.00000000, 1.0]])
T_lidar_vehicle = np.array([[ 0.99941328, 0.02547416, 0.02289458, 1.1],
[-0.02530855, 0.99965159, -0.00749488, 0.03773044170906172],
[-0.02307753, 0.00691106, 0.99970979, 1.9525244316515322],
[0.000000000, 0.00000000, 0,00000000, 1.0]])
T_lidar_camera = np.array([
[ 2.89748006e-02, -9.99580136e-01, 3.68439439e-05, -3.07300513e-02],
[-9.49930618e-03, -3.12215512e-04, -9.99954834e-01, -3.86689354e-01],
[ 9.99534999e-01, 2.89731321e-02, -9.50437214e-03, -6.71425124e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]
])
```
We find that these matrices are very accurate and worked well with perceptions task of identifying pedestrains using camera and lidar. Perception team makes use of our lidar->camera matrix. Below is an image showcasing the effectiveness of our lidar->camera matrix. You can see the lidar pointcloud corresponds very well to the pixels in the image.

<img width="260" alt="Screenshot 2025-02-26 at 11 07 16 PM" src="https://github.com/user-attachments/assets/65322674-c715-47d4-bbef-880022ba1a5d" />

We calculate this lidar->camera matrix by doing a matrix multiplication between the inverted camera->vehicle matrix and lidar->vehicle matrix. To evaluate the effectiveness of both of these matrices individually, we use visualizations in their respective calculations scripts.

For basic sanity check purposes, we also ensure that the determinant of the rotation matrices we get is 1.
38 changes: 27 additions & 11 deletions GEMstack/offboard/calibration/camera_to_vehicle_manual.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,26 +4,42 @@
import matplotlib.pyplot as plt
import numpy as np
from visualizer import visualizer
import argparse
import yaml

N = 8 #how many point pairs you want to select
parser = argparse.ArgumentParser(description='Select corresponding lidar, color, depth files based on index')
parser.add_argument('--data_path', type=str, required=True, help='Path to the dataset')
parser.add_argument('--index', type=int, required=True, help='Index for selecting the files')
parser.add_argument('--config', type=str, required=True, help='Path to YAML configuration file')
args = parser.parse_args()
args = parser.parse_args()

# Construct file paths based on the provided index
lidar_path = f'{args.data_path}/lidar{args.index}.npz'
rgb_path = f'{args.data_path}/color{args.index}.png'
depth_path = f'{args.data_path}/depth{args.index}.tif'

# Update Depending on Where Data Stored
rgb_path = './data/color32.png'
depth_path = './data/depth32.tif'
lidar_path = './data/lidar32.npz'
N = 8 #how many point pairs you want to select

img = cv2.imread(rgb_path, cv2.IMREAD_UNCHANGED)

lidar_points = np.load(lidar_path)['arr_0']
lidar_points = lidar_points[~np.all(lidar_points== 0, axis=1)] # remove (0,0,0)'s

rx,ry,rz = 0.006898647163954201, 0.023800082245145304, -0.025318355743942974
tx,ty,tz = 1.1, 0.037735827433173136, 1.953202227766785
rot = R.from_euler('xyz',[rx,ry,rz]).as_matrix()
lidar_ex = np.hstack([rot,[[tx],[ty],[tz]]])
lidar_ex = np.vstack([lidar_ex,[0,0,0,1]])

camera_in = np.array([

# Load transformation parameters from YAML file
with open(args.config, 'r') as yaml_file:
config = yaml.safe_load(yaml_file)

tx, ty, tz = config['position']
rot = np.array(config['rotation'])

# Construct transformation matrix
lidar_ex = np.hstack([rot, np.array([[tx], [ty], [tz]])])
lidar_ex = np.vstack([lidar_ex, [0, 0, 0, 1]])

camera_in = np.array([ # Update intrinsics if necessary
[684.83331299, 0. , 573.37109375],
[ 0. , 684.60968018, 363.70092773],
[ 0. , 0. , 1. ]
Expand Down
21 changes: 11 additions & 10 deletions GEMstack/offboard/calibration/capture_ouster_oak.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
import sensor_msgs.point_cloud2 as pc2
import ctypes
import struct
import argparse


# OpenCV and cv2 bridge
import cv2
Expand Down Expand Up @@ -70,20 +72,20 @@ def save_scan(lidar_fn,color_fn,depth_fn):
dimage = dimage.astype(np.uint16)
cv2.imwrite(depth_fn,dimage)

def main(folder='data',start_index=1):
def main(folder='data',start_index=1, frequency=2):
rospy.init_node("capture_ouster_oak",disable_signals=True)
lidar_sub = rospy.Subscriber("/ouster/points", PointCloud2, lidar_callback)
camera_sub = rospy.Subscriber("/oak/rgb/image_raw", Image, camera_callback)
depth_sub = rospy.Subscriber("/oak/stereo/image_raw", Image, depth_callback)
index = 0
index = start_index
print(" Storing lidar point clouds as npz")
print(" Storing color images as png")
print(" Storing depth images as tif")
print(" Ctrl+C to quit")
while True:
if camera_image and depth_image:
cv2.imshow("result",bridge.imgmsg_to_cv2(camera_image))
time.sleep(.5)
time.sleep(1.0/frequency)
files = [
os.path.join(folder,'lidar{}.npz'.format(index)),
os.path.join(folder,'color{}.png'.format(index)),
Expand All @@ -93,10 +95,9 @@ def main(folder='data',start_index=1):

if __name__ == '__main__':
import sys
folder = 'data'
start_index = 1
if len(sys.argv) >= 2:
folder = sys.argv[1]
if len(sys.argv) >= 3:
start_index = int(sys.argv[2])
main(folder,start_index)
parser = argparse.ArgumentParser(description='Capture LiDAR and camera data.')
parser.add_argument('--folder', type=str, default='data', help='Directory to store data')
parser.add_argument('--start_index', type=int, default=1, help='Starting index for saved files')
parser.add_argument('--frequency', type=float, default=2.0, help='Capture frequency in Hz')
args = parser.parse_args()
main(args.folder, args.start_index, args.frequency)
14 changes: 11 additions & 3 deletions GEMstack/offboard/calibration/lidar_to_vehicle.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,15 @@
import pyvista as pv
import matplotlib.pyplot as plt
from visualizer import visualizer
import argparse

VIS = False # True to show visuals
VIS = True # True to show visuals
parser = argparse.ArgumentParser(description='Process LiDAR data with calibration.')
parser.add_argument('--data_path', type=str, required=True, help='Path to the dataset')
parser.add_argument('--index', type=int, required=True, help='Index for selecting the files')
parser.add_argument('--vis', action='store_true', help='Enable visualization')
args = parser.parse_args()

VIS = args.vis

#%% things to extract
tx,ty,tz,rx,ry,rz = [None] * 6
Expand All @@ -24,6 +30,9 @@ def load_scene(path):
sc = np.load(path)['arr_0']
sc = sc[~np.all(sc == 0, axis=1)] # remove (0,0,0)'s
return sc

sc1 = load_scene(f'{args.data_path}/lidar{args.index}.npz')

def crop(pc,ix=None,iy=None,iz=None):
# crop a subrectangle in a pointcloud
# usage: crop(pc, ix = (x_min,x_max), ...)
Expand Down Expand Up @@ -153,7 +162,6 @@ def pc_diff(pc0,pc1,tol=0.1):

else: #False to use only cropping
# Update depending on where data is stored
sc1 = load_scene('./data/lidar1.npz')

objects = sc1 @ rot.T + [0,0,tz]

Expand Down
Loading

0 comments on commit 88cf406

Please sign in to comment.