-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathapriltags.py
272 lines (226 loc) · 9.68 KB
/
apriltags.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
# Created by Alex Pereira
# Import Libraries
import math
import cv2 as cv
import numpy as np
import pupil_apriltags
from wpilib import Timer
from wpimath.geometry import *
# Import Classes
from communications import NetworkCommunications
# Import Utilities
from Utilities.Units import Units
from Utilities.Logger import Logger
# The size of the tag in meters
tagSize = Units.inchesToMeters(6)
# Creates the Detector Class
class Detector:
def __init__(self) -> None:
"""
Constructor for the Detector class.
"""
# Instance creation
self.timer = Timer()
self.comms = NetworkCommunications()
# Creates a pupil apriltags detector
self.detector = pupil_apriltags.Detector(families = "tag16h5", nthreads = 10, quad_decimate = 1.0, quad_sigma = 0.0, refine_edges = 2.0, decode_sharpening = 1.00)
# Update logs
Logger.logInfo("Detector initialized")
def detectTags(self, stream, camera_matrix, vizualization: int = 0):
"""
Detects AprilTags in a stream using pupil_apriltags.
@param stream: An images generated by reading a VideoCapture
@param camera_matrix: The camera's calibration matrix
@param vizualization: 0 - Highlight, 1 - Highlight + Boxes, 2 - Highlight + Axes, 3 - Highlight + Boxes + Axes
@return detectionResult, image
"""
# If the stream is not grayscale, create a grayscale copy
if (len(stream.shape) == 3):
gray = cv.cvtColor(stream, cv.COLOR_BGR2GRAY)
else:
gray = stream
# Define the intrinsic parameters of the camera
intrinsic_properties = (camera_matrix[0, 0], camera_matrix[1, 1], camera_matrix[0, 2], camera_matrix[1, 2]) # fx, fy, cx, cy
# Detect the AprilTags in the image with Pupil Apriltags
detections = self.detector.detect(gray, estimate_tag_pose = True, camera_params = intrinsic_properties, tag_size = tagSize)
# Variables to use in detections
results = []
maxError = 5e-6
maxHamming = 0
minConfidence = 50
# Variables to use in sorting the data
best = None
minError = 1000
# Gets current time
time = self.timer.getFPGATimestamp()
# Access the 3D pose of all detected tag
for tag in detections:
# Gets info from the tag
decision_margin = tag.decision_margin
hamming = tag.hamming
tag_num = tag.tag_id
center = tag.center
error = tag.pose_err
# Gets pose data from the tag
rMatrix = tag.pose_R
tVecs = tag.pose_t
# Throws out tags not present on the field
if (1 <= tag_num <= 8):
# Throws out noise
if ((hamming <= maxHamming) and (error <= maxError) and (decision_margin >= minConfidence)):
# Creates a 3d pose array from the rotation matrix and translation vectors
pose = np.concatenate([rMatrix, tVecs], axis = 1)
else:
# Detected tag is noise, move to next detection
continue
else:
# Detected tag is not on field, move to next detection
continue
# Sets detection time
self.comms.setDetectionTimeSec(time)
# Draws varying levels of information onto the image
if (vizualization == 1):
self.draw_pose_box(stream, camera_matrix, pose)
elif (vizualization == 2):
self.draw_pose_axes(stream, camera_matrix, pose, center)
elif (vizualization == 3):
self.draw_pose_box(stream, camera_matrix, pose)
self.draw_pose_axes(stream, camera_matrix, pose, center)
# Calculate Pose3d
pose3d = self.getPose3D(pose)
# Adds results to the arrays
result = [tag_num, pose3d]
results.append(result)
# Determines if the current decision margin is larger than the last one and stores the corresponding data
if (error < minError):
minError = error
best = result
# Stores the best result in NetworkTables
if (best is not None):
self.comms.setBestResult(best)
# Determines if there are valid targets
if (len(results) > 0):
self.comms.setTargetValid(True)
else:
self.comms.setTargetValid(False)
return results, stream
def getPose3D(self, poseMatrix = None):
"""
Calculates a WPILib Pose3D from the PupilApriltags matrix
@param poseMatrix
@return Pose3D: Units in meters and radians
"""
# Variables
x, y, z = 0, 0, 0
# Extract the tag data from the detection results
if (poseMatrix is not None):
# Flattens the pose array into a 1D array
flatPose = np.array(poseMatrix).flatten()
# Creates the Pose3D components for a tag in the AprilTags WCS
try:
tempRot = Rotation3d(
np.array([
[flatPose[0], flatPose[1], flatPose[2]],
[flatPose[4], flatPose[5], flatPose[6]],
[flatPose[8], flatPose[9], flatPose[10]]
])
)
except ValueError as e:
Logger.logError(e)
tempRot = Rotation3d()
tempTrans = Translation3d(flatPose[3], flatPose[7], flatPose[11])
# Get the camera's measured X, Y, and Z
tempX = tempTrans.Z()
y = -tempTrans.X()
z = -tempTrans.Y()
# Create a Rotation3d object
rot = Rotation3d(round(tempRot.Z(), 2), round(-tempRot.X(), 2), round(-tempRot.Y(), 2))
# Calulates the field relative X and Y coordinate
yTrans = Translation2d(tempX, y).rotateBy(Rotation2d(-rot.Z()))
x = round(yTrans.X(), 2)
y = round(yTrans.Y(), 2)
# Calculates the field relative Z coordinate
zTrans = Translation2d(tempX, z).rotateBy(Rotation2d(np.pi + rot.Y()))
z = round(zTrans.Y(), 2)
# Create a Translation3d object
trans = Translation3d(x, y, z)
# Creates a Pose3D object in the field WCS
pose = Pose3d(trans, rot)
return pose
else:
# Returns a blank Pose3d
return Pose3d()
def draw_pose_box(self, img, camera_matrix, pose, z_sign = 1):
"""
Draws the 3d pose box around the AprilTag.
@param img: The image to write on
@param camera_matrix: The camera's calibration matrix
@param pose: The 3d pose of the tag
@param z_sign: The direction of the z-axis
"""
# Creates object points
opoints = np.array([
-1, -1, 0,
1, -1, 0,
1, 1, 0,
-1, 1, 0,
-1, -1, -2 * z_sign,
1, -1, -2 * z_sign,
1, 1, -2 * z_sign,
-1, 1, -2 * z_sign,
]).reshape(-1, 1, 3) * 0.5 * tagSize
# Creates edges
edges = np.array([
0, 1,
1, 2,
2, 3,
3, 0,
0, 4,
1, 5,
2, 6,
3, 7,
4, 5,
5, 6,
6, 7,
7, 4
]).reshape(-1, 2)
# Calulcates rotation and translation vectors for each AprilTag
rVecs, _ = cv.Rodrigues(pose[:3,:3])
tVecs = pose[:3, 3:]
# Derivative coefficients
dcoeffs = np.zeros(5)
# Calulate image points of each AprilTag
ipoints, _ = cv.projectPoints(opoints, rVecs, tVecs, camera_matrix, dcoeffs)
ipoints = np.round(ipoints).astype(int)
ipoints = [tuple(pt) for pt in ipoints.reshape(-1, 2)]
# Draws lines between all the edges
for i, j in edges:
cv.line(img, ipoints[i], ipoints[j], (0, 255, 0), 1, 16)
def draw_pose_axes(self, img, camera_matrix, pose, center):
"""
Draws the colored pose axes around the AprilTag.
@param img: The image to write on
@param camera_matrix: The camera's calibration matrix
@param pose: The 3d pose of the tag
@param center: The center of the AprilTag
"""
# Calulcates rotation and translation vectors for each AprilTag
rVecs, _ = cv.Rodrigues(pose[:3,:3])
tVecs = pose[:3, 3:]
# Derivative coefficients
dcoeffs = np.zeros(5)
# Calculate object points of each AprilTag
opoints = np.float32([[1, 0, 0],
[0, -1, 0],
[0, 0, -1]
]).reshape(-1, 3) * tagSize
# Calulate image points of each AprilTag
ipoints, _ = cv.projectPoints(opoints, rVecs, tVecs, camera_matrix, dcoeffs)
ipoints = np.round(ipoints).astype(int)
# Calulates the center
center = np.round(center).astype(int)
center = tuple(center.ravel())
# Draws the 3d pose lines
cv.line(img, center, tuple(ipoints[0].ravel()), (0, 0, 255), 2)
cv.line(img, center, tuple(ipoints[1].ravel()), (0, 255, 0), 2)
cv.line(img, center, tuple(ipoints[2].ravel()), (255, 0, 0), 2)