diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/404.html b/404.html new file mode 100644 index 0000000..7451872 --- /dev/null +++ b/404.html @@ -0,0 +1,718 @@ + + + +
+ + + + + + + + + + + + + + +common
view_points
+
+
+view_points(points: NDArrayF64, intrinsic: NDArrayF64, distortion: NDArrayF64 | None = None, *, normalize: bool = True) -> NDArrayF64
+
Project 3d points on a 2d plane. It can be used to implement both perspective and orthographic projections.
+It first applies the dot product between the points and the view.
+ + +Parameters:
+points
+ (NDArrayF64
)
+ –
+ Matrix of points, which is the shape of (3, n) and (x, y, z) is along each column.
+intrinsic
+ (NDArrayF64
)
+ –
+ nxn camera intrinsic matrix (n <= 4).
+distortion
+ (NDArrayF64 | None
, default:
+ None
+)
+ –
+ Camera distortion coefficients, which is the shape of (n,) (n >= 5).
+normalize
+ (bool
, default:
+ True
+)
+ –
+ Whether to normalize the remaining coordinate (along the 3rd axis).
+Returns:
+NDArrayF64
+ –
+ Projected points in the shape of (3, n). If normalize=False
, the 3rd coordinate is the height.
is_box_in_image
+
+
+is_box_in_image(box: Box3D, intrinsic: NDArrayF64, img_size: tuple[int, int], visibility: VisibilityLevel = VisibilityLevel.NONE) -> bool
+
Check if a box is visible inside of an image without considering its occlusions.
+ + +Parameters:
+box
+ (Box3D
)
+ –
+ The box to be checked.
+intrinsic
+ (NDArrayF64
)
+ –
+ 3x3 camera intrinsic matrix.
+img_size
+ (tuple[int, int]
)
+ –
+ Image size in the order of (width, height).
+visibility
+ (VisibilityLevel
, default:
+ NONE
+)
+ –
+ Enum member of VisibilityLevel.
+Returns:
+bool
+ –
+ Return True if visibility condition is satisfied.
+
load_json
+
+
+Load json data from specified filepath.
+ + +Parameters:
+filename
+ (str
)
+ –
+ File path to .json file.
+Returns:
+Any
+ –
+ Loaded data.
+dataclass
SemanticLabel
+
+
+A dataclass to represent semantic labels.
+ + +Attributes:
+name
+ (str
)
+ –
+ Label name.
+attributes
+ (list[str]
)
+ –
+ List of attribute names.
+
Box3D
+
+
+A class to represent 3D box.
+ + +Attributes:
+unix_time
+ (int
)
+ –
+ Unix timestamp.
+frame_id
+ (str
)
+ –
+ Coordinates frame ID where the box is with respect to.
+semantic_label
+ (SemanticLabel
)
+ –
+ SemanticLabel
object.
confidence
+ (float
)
+ –
+ Confidence score of the box.
+uuid
+ (str | None
)
+ –
+ Unique box identifier.
+position
+ (TranslationType
)
+ –
+ Box center position (x, y, z).
+rotation
+ (RotationType
)
+ –
+ Box rotation quaternion.
+shape
+ (Shape
)
+ –
+ Shape
object.
velocity
+ (VelocityType | None
)
+ –
+ Box velocity (vx, vy, vz).
+num_points
+ (int | None
)
+ –
+ The number of points inside the box.
+future
+ (list[Trajectory] | None
)
+ –
+ Box trajectory in the future of each mode.
+Examples:
+>>> # without future
+>>> box3d = Box3D(
+... unix_time=100,
+... frame_id="base_link",
+... semantic_label=SemanticLabel("car"),
+... position=(1.0, 1.0, 1.0),
+... rotation=Quaternion([0.0, 0.0, 0.0, 1.0]),
+... shape=Shape(shape_type=ShapeType.BOUNDING_BOX, size=(1.0, 1.0, 1.0)),
+... velocity=(1.0, 1.0, 1.0),
+... confidence=1.0,
+... uuid="car3d_0",
+... )
+>>> # with future
+>>> box3d = box3d.with_future(
+... waypoints=[[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]]],
+... confidences=[1.0],
+... )
+
with_future
+
+
+Return a self instance setting future
attribute.
Parameters:
+waypoints
+ (list[TrajectoryType]
)
+ –
+ List of waypoints for each mode.
+confidences
+ (list[float]
)
+ –
+ List of confidences for each mode.
+Returns:
+Self
+ –
+ Self instance after setting future
.
corners
+
+
+Return the bounding box corners.
+ + +Parameters:
+box_scale
+ (float
, default:
+ 1.0
+)
+ –
+ Multiply size by this factor to scale the box.
+Returns:
+NDArrayF64
+ –
+ First four corners are the ones facing forward. The last four are the ones facing backwards, +in the shape of (8, 3).
+
Box2D
+
+
+A class to represent 2D box.
+ + +Attributes:
+unix_time
+ (int
)
+ –
+ Unix timestamp.
+frame_id
+ (str
)
+ –
+ Coordinates frame ID where the box is with respect to.
+semantic_label
+ (SemanticLabel
)
+ –
+ SemanticLabel
object.
confidence
+ (float
)
+ –
+ Confidence score of the box.
+uuid
+ (str | None
)
+ –
+ Unique box identifier.
+roi
+ (Roi | None
)
+ –
+ Roi
object.
position
+ (TranslationType | None
)
+ –
+ 3D position (x, y, z).
+Examples:
+>>> # without 3D position
+>>> box2d = Box2D(
+... unix_time=100,
+... frame_id="camera",
+... semantic_label=SemanticLabel("car"),
+... roi=(100, 100, 50, 50),
+... confidence=1.0,
+... uuid="car2d_0",
+... )
+>>> # with 3D position
+>>> box2d = box2d.with_position(position=(1.0, 1.0, 1.0))
+
with_position
+
+
+Return a self instance setting position
attribute.
Parameters:
+position
+ (TranslationType
)
+ –
+ 3D position.
+Returns:
+Self
+ –
+ Self instance after setting position
.
distance_box
+
+
+Return a box distance from base_link
.
Parameters:
+box
+ (BoxType
)
+ –
+ A box.
+tf_matrix
+ (HomogeneousMatrix
)
+ –
+ Transformation matrix.
+Raises:
+TypeError
+ –
+ Expecting type of box is Box2D
or Box3D
.
Returns:
+float | None
+ –
+ float | None: Return None
if the type of box is Box2D
and its position
is None
,
+otherwise returns distance from base_link
.
PointCloud
+
+
+Abstract base dataclass for pointcloud data.
+ + + + +
num_dims
+
+
+
+ abstractmethod
+ staticmethod
+
+
+Return the number of the point dimensions.
+ + +Returns:
+int
( int
+) –
+ The number of the point dimensions.
+
from_file
+
+
+
+ abstractmethod
+ classmethod
+
+
+Create an object from pointcloud file.
+ + +Parameters:
+filepath
+ (str
)
+ –
+ File path of the pointcloud file.
+Returns:
+Self
+ –
+ Self instance.
+
LidarPointCloud
+
+
+A dataclass to represent lidar pointcloud.
+ + +Attributes:
+points
+ (NDArrayFloat
)
+ –
+ Points matrix in the shape of (4, N).
+
RadarPointCloud
+
+
+A dataclass to represent radar pointcloud.
+ + +Attributes:
+points
+ (NDArrayFloat
)
+ –
+ Points matrix in the shape of (18, N).
+
SegmentationPointCloud
+
+
+A dataclass to represent segmentation pointcloud.
+ + +Attributes:
+points
+ (NDArrayFloat
)
+ –
+ Points matrix in the shape of (4, N).
+labels
+ (NDArrayU8
)
+ –
+ Label matrix.
+
Roi
+
+
+A dataclass to represent 2D box ROI.
+ + +Attributes:
+roi
+ (RoiType
)
+ –
+ Box ROI in the order of (x, y, width, height).
+
offset
+
+
+
+ property
+
+
+Return the xy offset from the image origin at the top left of the box.
+ + +Returns:
+tuple[int, int]
+ –
+ Top left corner (x, y).
+
size
+
+
+
+ property
+
+
+Return the size of the box.
+ + +Returns:
+tuple[int, int]
+ –
+ Box size (width, height).
+
width
+
+
+
+ property
+
+
+Return the width of the box.
+ + +Returns:
+int
+ –
+ Box width.
+
height
+
+
+
+ property
+
+
+Return the height of the box.
+ + +Returns:
+int
+ –
+ Box height.
+
center
+
+
+
+ property
+
+
+Return the center position of the box from the image origin.
+ + +Returns:
+tuple[int, int]
+ –
+ Center position of the box (cx, cy).
+
ShapeType
+
+
+
+ Bases: Enum
Trajectory
+
+
+A dataclass to represent trajectory.
+ + +Attributes:
+waypoints
+ (TrajectoryType
)
+ –
+ Waypoints matrix in the shape of (N, 3).
+confidence
+ (float
)
+ –
+ Confidence score the trajectory.
+Examples:
+>>> trajectory = Trajectory(
+... waypoints=[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]],
+... confidence=1.0,
+... )
+# Get the number of waypoints.
+>>> len(trajectory)
+2
+# Access the shape of waypoints matrix: (N, 3).
+>>> trajectory.shape
+(2, 3)
+# Access each point as subscriptable.
+>>> trajectory[0]
+array([1., 1., 1.])
+# Access each point as iterable.
+>>> for point in trajectory:
+... print(point)
+...
+[1. 1. 1.]
+[2. 2. 2.]
+
to_trajectories
+
+
+Convert a list of waypoints and confidences to a list of Trajectory
s for each mode.
Parameters:
+waypoints
+ (list[TrajectoryType]
)
+ –
+ List of waypoints for each mode.
+confidences
+ (list[float]
)
+ –
+ List of confidences for each mode.
+Returns:
+list[Trajectory]
+ –
+ List of Trajectory
s for each mode.
TransformBuffer
+
+
+A buffer class to store transformation matrices.
+ + +Parameters:
+buffer
+ (dict[tuple[str, str], HomogeneousMatrix]
)
+ –
+ Matrix buffer whose key is (src, dst)
.
set_transform
+
+
+Set transform matrix to the buffer. +Also, if its inverse transformation has not been registered, registers it too.
+ + +Parameters:
+matrix
+ (HomogeneousMatrix
)
+ –
+ Transformation matrix.
+
lookup_transform
+
+
+Look up the transform matrix corresponding to the src
and dst
frame ID.
Parameters:
+src
+ (str
)
+ –
+ Source frame ID.
+dst
+ (str
)
+ –
+ Destination frame ID.
+Returns:
+HomogeneousMatrix | None
+ –
+ Returns HomogeneousMatrix
if the corresponding matrix can be found,
+otherwise it returns None
.
do_translate
+
+
+Translate specified items with the matrix corresponding to src
and dst
frame ID.
Parameters:
+src
+ (str
)
+ –
+ Source frame ID.
+dst
+ (str
)
+ –
+ Destination frame ID.
+Returns:
+TranslateItemLike | None
+ –
+ TranslateItemLike | None: Returns translated items if the corresponding matrix can be found,
+otherwise it returns None
.
do_rotate
+
+
+Rotate specified items with the matrix corresponding to src
and dst
frame ID.
Parameters:
+src
+ (str
)
+ –
+ Source frame ID.
+dst
+ (str
)
+ –
+ Destination frame ID.
+Returns:
+RotateItemLike | None
+ –
+ TranslateItemLike | None: Returns rotated items if the corresponding matrix can be found,
+otherwise it returns None
.
do_transform
+
+
+Transform specified items with the matrix corresponding to src
and dst
frame ID.
Parameters:
+src
+ (str
)
+ –
+ Source frame ID.
+dst
+ (str
)
+ –
+ Destination frame ID.
+Returns:
+TransformItemLike | None
+ –
+ TranslateItemLike | None: Returns transformed items if the corresponding matrix can be found,
+otherwise it returns None
.
HomogeneousMatrix
+
+
+
shape
+
+
+
+ property
+
+
+Return a shape of the homogeneous matrix.
+ + +Returns:
+tuple[int, ...]
+ –
+ Return the shape of (4, 4).
+
yaw_pitch_roll
+
+
+
+ property
+
+
+Return yaw, pitch and roll.
+ + +yaw: Rotation angle around the z-axis in [rad], in the range [-pi, pi]
.
+pitch: Rotation angle around the y'-axis in [rad], in the range [-pi/2, pi/2]
.
+roll: Rotation angle around the x"-axis in [rad], in the range [-pi, pi]
.
Returns:
+tuple[float, float, float]
+ –
+ Yaw, pitch and roll in [rad].
+
rotation_matrix
+
+
+
+ property
+
+
+Return a 3x3 rotation matrix.
+ + +Returns:
+NDArray
+ –
+ 3x3 rotation matrix.
+
as_identity
+
+
+
+ classmethod
+
+
+Construct a new object with identity.
+ + +Parameters:
+frame_id
+ (str
)
+ –
+ Frame ID.
+Returns:
+Self
+ –
+ Constructed self instance.
+
from_matrix
+
+
+
+ classmethod
+
+
+from_matrix(matrix: NDArray | HomogeneousMatrix, src: str | None = None, dst: str | None = None) -> Self
+
Construct a new object from a homogeneous matrix.
+ + +Parameters:
+matrix
+ (NDArray | HomogeneousMatrix
)
+ –
+ 4x4 homogeneous matrix.
+src
+ (str | None
, default:
+ None
+)
+ –
+ Source frame ID.
+This must be specified only if the input matrix is NDArray
.
dst
+ (str | None
, default:
+ None
+)
+ –
+ Destination frame ID.
+This must be specified only if the input matrix is NDArray
.
Returns:
+Self
+ –
+ Constructed self instance.
+
dot
+
+
+Return a dot product of myself and another.
+ + +Parameters:
+other
+ (HomogeneousMatrix
)
+ –
+ HomogeneousMatrix
object.
Raises:
+ValueError
+ –
+ self.src
and other.dst
must be the same frame ID.
Returns:
+HomogeneousMatrix
+ –
+ Result of a dot product.
+
inv
+
+
+Return a inverse matrix of myself.
+ + +Returns:
+HomogeneousMatrix
+ –
+ Inverse matrix.
+filtering
BoxFilter
+
+
+A class composes multiple filtering functions.
+ +Construct a new object.
+ + +Parameters:
+params
+ (FilterParams
)
+ –
+ Filtering parameters.
+tf_buffer
+ (TransformBuffer
)
+ –
+ Transformation buffer.
+
FilterByLabel
+
+
+Filter a box by checking if the label of the box is included in specified labels.
+Note that, if labels
is None all boxes pass through this filter.
Construct a new object.
+ + +Parameters:
+labels
+ (Sequence[str | SemanticLabel] | None
, default:
+ None
+)
+ –
+ Sequence of target labels.
+If None
, this filter always returns True
.
FilterByUUID
+
+
+Filter a box by checking if the uuid of the box is included in specified uuids.
+Note that, if uuids
is None all boxes pass through this filter.
Construct a new object.
+ + +Parameters:
+uuids
+ (Sequence[str] | None
, default:
+ None
+)
+ –
+ Sequence of target uuids.
+If None
, this filter always returns True
.
FilterByDistance
+
+
+Filter a box by checking if the box is within the specified distance.
+Note that, the type box is Box2D
and its position
is None,
+these boxes pass through this filter.
Construct a new object.
+ + +Parameters:
+min_distance
+ (float
)
+ –
+ Minimum distance from the ego [m].
+max_distance
+ (float
)
+ –
+ Maximum distance from the ego [m].
+
FilterByPosition
+
+
+Filter a box by checking if the box xy position is within the specified xy position.
+Note that, the type box is Box2D
and its position
is None,
+these boxes pass through this filter.
Construct a new object.
+ + +Parameters:
+min_xy
+ (tuple[float, float]
)
+ –
+ Minimum xy position [m].
+max_xy
+ (tuple[float, float]
)
+ –
+ Maximum xy position [m].
+
FilterBySpeed
+
+
+Filter a 3D box by checking if the box speed is within the specified one.
+Note that, the type box is Box2D
, or Box3D
and its velocity
is None,
+these boxes pass through this filter.
Construct a new object.
+ + +Parameters:
+min_speed
+ (float
)
+ –
+ Minimum speed [m/s].
+max_speed
+ (float
)
+ –
+ Maximum speed [m/s].
+
FilterByNumPoints
+
+
+Filter a 3D box by checking if the box includes points greater than the specified one.
+Note that, the type box is Box2D
, or Box3D
and its num_points
is None,
+these boxes pass through this filter.
Construct a new object.
+ + +Parameters:
+min_num_points
+ (int
, default:
+ 0
+)
+ –
+ The minimum number of points that a box should include.
+
FilterParams
+
+
+A dataclass to represent filtering parameters.
+ + +Attributes:
+labels
+ (Sequence[str | SemanticLabel] | None
)
+ –
+ Sequence of target labels.
+uuids
+ (Sequence[str] | None
)
+ –
+ Sequence of target uuids.
+min_distance
+ (float
)
+ –
+ Minimum distance from the ego [m].
+max_distance
+ (float
)
+ –
+ Maximum distance from the ego [m].
+min_xy
+ (tuple[float, float]
)
+ –
+ Minimum xy position from the ego [m].
+min_xy
+ (tuple[float, float]
)
+ –
+ Maximum xy position from the ego [m].
+min_speed
+ (float
)
+ –
+ Minimum speed [m/s].
+max_speed
+ (float
)
+ –
+ Maximum speed [m/s].
+min_num_points
+ (int
)
+ –
+ The minimum number of points which the 3D box should include.
+schema
SchemaName
+
+
+An enum to represent schema filenames.
+ + +Attributes:
+ATTRIBUTE
+ –
+ Property of an instance that can change while the category remains the same.
+CALIBRATED_SENSOR
+ –
+ Definition of a particular sensor as calibrated on a vehicle.
+CATEGORY
+ –
+ Object categories.
+EGO_POSE
+ –
+ Ego vehicle pose at at particular timestamp.
+INSTANCE
+ –
+ An object instance.
+LOG
+ –
+ Information about the log from which the data aws extracted.
+MAP
+ –
+ Map data that is stored as binary semantic masks from a top-down view.
+SAMPLE
+ –
+ A sample is an annotated keyframe at specific Hz.
+SAMPLE_ANNOTATION
+ –
+ A bounding box defining the position of an object seen in a sample.
+SAMPLE_DATA
+ –
+ A sensor data e.g. image, pointcloud or radar return.
+SCENE
+ –
+ A scene is a specific long sequence of consecutive frames extracted from a log.
+SENSOR
+ –
+ A specific sensor type.
+VISIBILITY
+ –
+ The visibility of instance is the fraction of annotation visible in all images.
+OBJECT_ANN
+ (optional
)
+ –
+ The annotation of a foreground object in an image.
+SURFACE_ANN
+ (optional
)
+ –
+ The annotation of a background object in an image.
+KEYPOINT
+ (optional
)
+ –
+ The annotation of pose keypoints of an object in an image.
+VEHICLE_STATE
+ (optional
)
+ –
+ The annotation of ego vehicle states.
+
filename
+
+
+
+ property
+
+
+Return the annotation json filename.
+ + +Returns:
+str
+ –
+ Annotation json filename.
+
Attribute
+
+
+
+ Bases: SchemaBase
A dataclass to represent schema table of attribute.json
.
Attributes:
+token
+ (str
)
+ –
+ Unique record identifier.
+name
+ (str
)
+ –
+ Attribute name.
+description
+ (str
)
+ –
+ Attribute description.
+
CalibratedSensor
+
+
+
+ Bases: SchemaBase
A dataclass to represent schema table of calibrated_sensor.json
.
Attributes:
+token
+ (str
)
+ –
+ Unique record identifier.
+sensor_token
+ (str
)
+ –
+ Foreign key pointing to the sensor type.
+translation
+ (TranslationType
)
+ –
+ Coordinates system origin given as [x, y, z] in [m].
+rotation
+ (RotationType
)
+ –
+ Coordinates system orientation given as quaternion [w, x, y, z].
+camera_intrinsic
+ (CamIntrinsicType
)
+ –
+ 3x3 camera intrinsic matrix. Empty for sensors that are not cameras.
+camera_distortion
+ (CamDistortionType
)
+ –
+ Camera distortion array. Empty for sensors that are not cameras.
+
Category
+
+
+
+ Bases: SchemaBase
A dataclass to represent schema table of category.json
.
Attributes:
+token
+ (str
)
+ –
+ Unique record identifier.
+name
+ (str
)
+ –
+ Category name.
+description
+ (str
)
+ –
+ Category description.
+
EgoPose
+
+
+
+ Bases: SchemaBase
A dataclass to represent schema table of ego_pose.json
.
Attributes:
+token
+ (str
)
+ –
+ Unique record identifier.
+translation
+ (TranslationType
)
+ –
+ Coordinate system origin given as [x, y, z] in [m].
+rotation
+ (RotationType
)
+ –
+ Coordinate system orientation given as quaternion [w, x, y, z].
+timestamp
+ (int
)
+ –
+ Unix time stamp.
+twist
+ (TwistType | None
)
+ –
+ Linear and angular velocities in the local coordinate system of +the ego vehicle (in m/s for linear and rad/s for angular), in the order of +(vx, vy, vz, yaw_rate, pitch_rate, roll_rate).
+acceleration
+ (AccelerationType | None
)
+ –
+ Acceleration in the local coordinate system of +the ego vehicle (in m/s2), in the order of (ax, ay, az).
+geocoordinate
+ (GeoCoordinateType | None
)
+ –
+ Coordinates in the WGS 84 reference ellipsoid +(latitude, longitude, altitude) in degrees and meters.
+
Instance
+
+
+
+ Bases: SchemaBase
A dataclass to represent schema table of instance.json
.
Attributes:
+token
+ (str
)
+ –
+ Unique record identifier.
+category_token
+ (str
)
+ –
+ Foreign key pointing to the object category.
+instance_name
+ (str
)
+ –
+ Dataset name and instance ID defined in annotation tool.
+nbr_annotations
+ (int
)
+ –
+ Number of annotations of this instance.
+first_annotation_token
+ (str
)
+ –
+ Foreign key pointing to the first annotation of this instance.
+last_annotation_token
+ (str
)
+ –
+ Foreign key pointing to the last annotation of this instance.
+
Keypoint
+
+
+
+ Bases: SchemaBase
A dataclass to represent schema table of keypoint.json
.
Attributes:
+token
+ (str
)
+ –
+ Unique record identifier.
+sample_data_token
+ (str
)
+ –
+ Foreign key pointing to the sample data, which must be a keyframe image.
+instance_token
+ (str
)
+ –
+ Foreign key pointing to the instance.
+category_tokens
+ (list[str]
)
+ –
+ Foreign key pointing to keypoints categories.
+keypoints
+ (KeypointType
)
+ –
+ Annotated keypoints. Given as a list of [x, y].
+num_keypoints
+ (int
)
+ –
+ The number of keypoints to be annotated.
+
Log
+
+
+
+ Bases: SchemaBase
A dataclass to represent schema table of log.json
.
Attributes:
+token
+ (str
)
+ –
+ Unique record identifier.
+logfile
+ (str
)
+ –
+ Log file name.
+vehicle
+ (str
)
+ –
+ Vehicle name.
+data_captured
+ (str
)
+ –
+ Date of the data was captured (YYYY-MM-DD-HH-mm-ss).
+location
+ (str
)
+ –
+ Area where log was captured.
+map_token (str): Foreign key pointing to the map record. + This should be set after instantiated.
+
Map
+
+
+
+ Bases: SchemaBase
A dataclass to represent schema table of map.json
.
Attributes:
+token
+ (str
)
+ –
+ Unique record identifier.
+log_tokens
+ (str
)
+ –
+ Foreign keys pointing the log tokens.
+category
+ (str
)
+ –
+ Map category.
+filename
+ (str
)
+ –
+ Relative path to the file with the map mask.
+
ObjectAnn
+
+
+
+ Bases: SchemaBase
A dataclass to represent schema table of object_ann.json
.
Attributes:
+token
+ (str
)
+ –
+ Unique record identifier.
+sample_data_token
+ (str
)
+ –
+ Foreign key pointing to the sample data, which must be a keyframe image.
+instance_token
+ (str
)
+ –
+ Foreign key pointing to the instance.
+category_token
+ (str
)
+ –
+ Foreign key pointing to the object category.
+attribute_tokens
+ (list[str]
)
+ –
+ Foreign keys. List of attributes for this annotation.
+bbox
+ (RoiType
)
+ –
+ Annotated bounding box. Given as [xmin, ymin, xmax, ymax].
+mask
+ (RLEMask
)
+ –
+ Instance mask using the COCO format compressed by RLE.
+category_name (str): Category name. This should be set after instantiated.
+
width
+
+
+
+ property
+
+
+Return the width of the bounding box.
+ + +Returns:
+int
+ –
+ Bounding box width in pixel.
+
Sample
+
+
+
+ Bases: SchemaBase
A dataclass to represent schema table of sample.json
.
Attributes:
+token
+ (str
)
+ –
+ Unique record identifier.
+timestamp
+ (int
)
+ –
+ Unix time stamp.
+scene_token
+ (str
)
+ –
+ Foreign key pointing to the scene.
+next
+ (str
)
+ –
+ Foreign key pointing the sample that follows this in time. Empty if end of scene.
+prev
+ (str
)
+ –
+ Foreign key pointing the sample that precedes this in time. Empty if start of scene.
+data (dict[str, str]): Sensor channel and its token. + This should be set after instantiated. +ann_3ds (list[str]): List of foreign keys pointing the sample annotations. + This should be set after instantiated. +ann_2ds (list[str]): List of foreign keys pointing the object annotations. + This should be set after instantiated. +surface_anns (list[str]): List of foreign keys pointing the surface annotations. + This should be set after instantiated.
+
SampleAnnotation
+
+
+
+ Bases: SchemaBase
A dataclass to represent schema table of sample_annotation.json
.
Attributes:
+token
+ (str
)
+ –
+ Unique record identifier.
+sample_token
+ (str
)
+ –
+ Foreign key pointing the sample.
+instance_token
+ (str
)
+ –
+ Foreign key pointing the object instance.
+attribute_tokens
+ (list[str]
)
+ –
+ Foreign keys. List of attributes for this annotation.
+visibility_token
+ (str
)
+ –
+ Foreign key pointing the object visibility.
+translation
+ (TranslationType
)
+ –
+ Bounding box location given as [x, y, z] in [m].
+size
+ (SizeType
)
+ –
+ Bounding box size given as [width, length, height] in [m].
+rotation
+ (RotationType
)
+ –
+ Bounding box orientation given as quaternion [w, x, y, z].
+num_lidar_pts
+ (int
)
+ –
+ Number of lidar points in this box.
+num_radar_pts
+ (int
)
+ –
+ Number of radar points in this box.
+next
+ (str
)
+ –
+ Foreign key pointing the annotation that follows this in time. +Empty if this is the last annotation for this object.
+prev
+ (str
)
+ –
+ Foreign key pointing the annotation that precedes this in time. +Empty if this the first annotation for this object.
+velocity
+ (VelocityType | None
)
+ –
+ Bounding box velocity given as +[vx, vy, vz] in [m/s].
+acceleration
+ (AccelerationType | None
)
+ –
+ Bonding box acceleration +given as [ax, ay, av] in [m/s^2].
+category_name (str): Category name. This should be set after instantiated.
+
SampleData
+
+
+
+ Bases: SchemaBase
A class to represent schema table of sample_data.json
.
Attributes:
+token
+ (str
)
+ –
+ Unique record identifier.
+sample_token
+ (str
)
+ –
+ Foreign key pointing the sample.
+ego_pose_token
+ (str
)
+ –
+ Foreign key pointing the ego_pose.
+calibrated_sensor_token
+ (str
)
+ –
+ Foreign key pointing the calibrated_sensor.
+filename
+ (str
)
+ –
+ Relative path to data-blob on disk.
+fileformat
+ (FileFormat
)
+ –
+ Data file format.
+width
+ (int
)
+ –
+ If the sample data is an image, this is the image width in [px].
+height
+ (int
)
+ –
+ If the sample data is an image, this is the image height in [px].
+timestamp
+ (int
)
+ –
+ Unix time stamp.
+is_key_frame
+ (bool
)
+ –
+ True if sample_data is part of key frame else, False.
+next
+ (str
)
+ –
+ Foreign key pointing the sample_data that follows this in time. +Empty if end of scene.
+prev
+ (str
)
+ –
+ Foreign key pointing the sample_data that precedes this in time. +Empty if start of scene.
+is_valid
+ (bool
)
+ –
+ True if this data is valid, else False. Invalid data should be ignored.
+modality (SensorModality): Sensor modality. This should be set after instantiated. +channel (str): Sensor channel. This should be set after instantiated.
+
Scene
+
+
+
+ Bases: SchemaBase
A dataclass to represent schema table of scene.json
.
Attributes:
+token
+ (str
)
+ –
+ Unique record identifier.
+name
+ (str
)
+ –
+ Short string identifier.
+description
+ (str
)
+ –
+ Longer description for the scene.
+log_token
+ (str
)
+ –
+ Foreign key pointing to log from where the data was extracted.
+nbr_samples
+ (int
)
+ –
+ Number of samples in the scene.
+first_sample_token
+ (str
)
+ –
+ Foreign key pointing to the first sample in scene.
+last_sample_token
+ (str
)
+ –
+ Foreign key pointing to the last sample in scene.
+
Sensor
+
+
+
+ Bases: SchemaBase
A dataclass to represent schema table of sensor.json
.
Attributes:
+token
+ (str
)
+ –
+ Unique record identifier.
+channel
+ (str
)
+ –
+ Sensor channel name.
+modality
+ (SensorModality
)
+ –
+ Sensor modality.
+first_sd_token (str): The first sample data token corresponding to its sensor channel.
+
SurfaceAnn
+
+
+
+ Bases: SchemaBase
A dataclass to represent schema table of surface_ann.json
.
Attributes:
+token
+ (str
)
+ –
+ Unique record identifier.
+sample_data_token
+ (str
)
+ –
+ Foreign key pointing to the sample data, which must be a keyframe image.
+category_token
+ (str
)
+ –
+ Foreign key pointing to the surface category.
+mask
+ (RLEMask
)
+ –
+ Segmentation mask using the COCO format compressed by RLE.
+
ShiftState
+
+
+
+ Bases: str
, Enum
An enum to represent gear shift state.
+ + + + +
IndicatorState
+
+
+
+ Bases: str
, Enum
An enum to represent indicator state.
+ + + + +
Indicators
+
+
+A dataclass to represent state of each indicator.
+ + +Attributes:
+left
+ (IndicatorState
)
+ –
+ State of the left indicator.
+right
+ (IndicatorState
)
+ –
+ State of the right indicator.
+hazard
+ (IndicatorState
)
+ –
+ State of the hazard lights.
+
AdditionalInfo
+
+
+A dataclass to represent additional state information of the ego vehicle.
+ + +Attributes:
+speed
+ (float | None
)
+ –
+ Speed of the ego vehicle.
+
VehicleState
+
+
+
+ Bases: SchemaBase
A dataclass to represent schema table of vehicle_state.json
.
Attributes:
+token
+ (str
)
+ –
+ Unique record identifier.
+timestamp
+ (int
)
+ –
+ Unix time stamp.
+accel_pedal
+ (float | None
)
+ –
+ Accel pedal position [%].
+brake_pedal
+ (float | None
)
+ –
+ Brake pedal position [%].
+steer_pedal
+ (float | None
)
+ –
+ Steering wheel position [%].
+steering_tire_angle
+ (float | None
)
+ –
+ Steering tire angle [rad].
+steering_wheel_angle
+ (float | None
)
+ –
+ Steering wheel angle [rad].
+shift_state
+ (ShiftState | None
)
+ –
+ Gear shift state.
+indicators
+ (Indicators | None
)
+ –
+ State of each indicator.
+additional_info
+ (AdditionalInfo | None
)
+ –
+ Additional state information.
+
Visibility
+
+
+
+ Bases: SchemaBase
A dataclass to represent schema table of visibility.json
.
Attributes:
+token
+ (str
)
+ –
+ Unique record identifier.
+level
+ (VisibilityLevel
)
+ –
+ Visibility level.
+description
+ (str
)
+ –
+ Description of visibility level.
+
FileFormat
+
+
+
+ Bases: str
, Enum
An enum to represent file formats.
+ + +Attributes:
+JPG
+ –
+ JPG format for image data.
+PNG
+ –
+ PNG format for image data.
+PCD
+ –
+ PCD format for pointcloud data.
+BIN
+ –
+ BIN format.
+PCDBIN
+ –
+ PCD.BIN format for pointcloud data.
+
is_member
+
+
+
+ staticmethod
+
+
+Indicate whether the input item is the one of members of FileFormat.
+ + +Parameters:
+item
+ (str
)
+ –
+ Any file format name.
+Returns:
+bool
+ –
+ Return True if the item is included.
+
values
+
+
+
+ staticmethod
+
+
+Return a list of values of members.
+ + +Returns:
+list[str]
+ –
+ List of values.
+
SensorModality
+
+
+
+ Bases: str
, Enum
An enum to represent sensor modalities.
+ + +Attributes:
+LIDAR
+ –
+ Lidar sensor.
+CAMERA
+ –
+ Camera sensor.
+RADAR
+ –
+ Radar sensor.
+
VisibilityLevel
+
+
+
+ Bases: str
, Enum
An enum to represent visibility levels.
+ + +Attributes:
+FULL
+ –
+ No occlusion for the object.
+MOST
+ –
+ Object is occluded, but by less than 50%.
+PARTIAL
+ –
+ Object is occluded, but by more than 50%.
+NONE
+ –
+ Object is 90-100% occluded and no points/pixels are visible in the label.
+UNAVAILABLE
+ –
+ Visibility level is not specified.
+
RLEMask
+
+
+A dataclass to represent segmentation mask compressed by RLE.
+ + +Attributes:
+size
+ (list[int, int]
)
+ –
+ Size of image ordering (width, height).
+counts
+ (str
)
+ –
+ RLE compressed mask data.
+
build_schema
+
+
+Build schema dataclass from json file path.
+ + +Parameters:
+name
+ (str | SchemaName
)
+ –
+ Name of schema table.
+filepath
+ (str
)
+ –
+ Path to json file.
+Returns:
+list[SchemaTable]
+ –
+ List of schema dataclasses.
+
SchemaRegistry
+
+
+A manager class to register schema tables.
+ + + + +
build_from_json
+
+
+Build schema dataclass from json.
+ + +Parameters:
+key
+ (str
)
+ –
+ Name of schema field.
+filepath
+ (str
)
+ –
+ Path to schema json file.
+Returns:
+list[SchemaTable]
+ –
+ Instantiated dataclass.
+
serialize_schemas
+
+
+Serialize a list of schema dataclasses into list of dict.
+ + +Parameters:
+data
+ (list[SchemaTable]
)
+ –
+ List of schema dataclasses.
+Returns:
+list[dict]
+ –
+ Serialized list of dict data.
+
Tier4
+
+
+Database class for T4 dataset to help query and retrieve information from the database.
+ +Load database and creates reverse indexes and shortcuts.
+ + +Parameters:
+version
+ (str
)
+ –
+ Directory name of database json files.
+data_root
+ (str
)
+ –
+ Path to the root directory of dataset.
+verbose
+ (bool
, default:
+ True
+)
+ –
+ Whether to display status during load.
+Examples:
+>>> from t4_devkit import Tier4
+>>> t4 = Tier4("annotation", "data/tier4")
+======
+Loading T4 tables in `annotation`...
+Reverse indexing...
+Done reverse indexing in 0.010 seconds.
+======
+21 category
+8 attribute
+4 visibility
+31 instance
+7 sensor
+7 calibrated_sensor
+2529 ego_pose
+1 log
+1 scene
+88 sample
+2529 sample_data
+1919 sample_annotation
+0 object_ann
+0 surface_ann
+0 keypoint
+1 map
+Done loading in 0.046 seconds.
+======
+
get_table
+
+
+Return the list of dataclasses corresponding to the schema table.
+ + +Parameters:
+schema
+ (str | SchemaName
)
+ –
+ Name of schema table.
+Returns:
+list[SchemaTable]
+ –
+ List of dataclasses.
+
get
+
+
+Return a record identified by the associated token.
+ + +Parameters:
+schema
+ (str | SchemaName
)
+ –
+ Name of schema.
+token
+ (str
)
+ –
+ Token to identify the specific record.
+Returns:
+SchemaTable
+ –
+ Table record of the corresponding token.
+
get_idx
+
+
+Return the index of the record in a table in constant runtime.
+ + +Parameters:
+schema
+ (str | SchemaName
)
+ –
+ Name of schema.
+token
+ (str
)
+ –
+ Token of record.
+Returns:
+int
+ –
+ The index of the record in table.
+
get_sample_data_path
+
+
+Return the file path to a raw data recorded in sample_data
.
Parameters:
+sample_data_token
+ (str
)
+ –
+ Token of sample_data
.
Returns:
+str
+ –
+ File path.
+
get_sample_data
+
+
+get_sample_data(sample_data_token: str, selected_ann_tokens: list[str] | None = None, *, as_3d: bool = True, visibility: VisibilityLevel = VisibilityLevel.NONE) -> tuple[str, list[BoxType], CamIntrinsicType | None]
+
Return the data path as well as all annotations related to that sample_data
.
Parameters:
+sample_data_token
+ (str
)
+ –
+ Token of sample_data
.
selected_ann_tokens
+ (list[str] | None
, default:
+ None
+)
+ –
+ Specify if you want to extract only particular annotations.
+as_3d
+ (bool
, default:
+ True
+)
+ –
+ Whether to return 3D or 2D boxes.
+visibility
+ (VisibilityLevel
, default:
+ NONE
+)
+ –
+ If sample_data
is an image,
+this sets required visibility for only 3D boxes.
Returns:
+tuple[str, list[BoxType], CamIntrinsicType | None]
+ –
+ Data path, a list of boxes and 3x3 camera intrinsic matrix.
+
get_semantic_label
+
+
+get_semantic_label(category_token: str, attribute_tokens: list[str] | None = None) -> SemanticLabel
+
Return a SemanticLabel instance from specified category_token
and attribute_tokens
.
Parameters:
+category_token
+ (str
)
+ –
+ Token of Category
table.
attribute_tokens
+ (list[str] | None
, default:
+ None
+)
+ –
+ List of attribute tokens.
+Returns:
+SemanticLabel
+ –
+ Instantiated SemanticLabel.
+
get_box3d
+
+
+Return a Box3D class from a sample_annotation
record.
Parameters:
+sample_annotation_token
+ (str
)
+ –
+ Token of sample_annotation
.
Returns:
+Box3D
+ –
+ Instantiated Box3D.
+
get_box2d
+
+
+Return a Box2D class from a object_ann
record.
Parameters:
+object_ann_token
+ (str
)
+ –
+ Token of object_ann
.
Returns:
+Box2D
+ –
+ Instantiated Box2D.
+
get_box3ds
+
+
+Rerun a list of Box3D classes for all annotations of a particular sample_data
record.
+It the sample_data
is a keyframe, this returns annotations for the corresponding sample
.
Parameters:
+sample_data_token
+ (str
)
+ –
+ Token of sample_data
.
Returns:
+list[Box3D]
+ –
+ List of instantiated Box3D classes.
+
get_box2ds
+
+
+Rerun a list of Box2D classes for all annotations of a particular sample_data
record.
+It the sample_data
is a keyframe, this returns annotations for the corresponding sample
.
Parameters:
+sample_data_token
+ (str
)
+ –
+ Token of sample_data
.
Returns:
+list[Box2D]
+ –
+ List of instantiated Box2D classes.
+
box_velocity
+
+
+Return the velocity of an annotation. +If corresponding annotation has a true velocity, this returns it. +Otherwise, this estimates the velocity by computing the difference +between the previous and next frame. +If it is failed to estimate the velocity, values are set to np.nan.
+ + +Parameters:
+sample_annotation_token
+ (str
)
+ –
+ Token of sample_annotation
.
max_time_diff
+ (float
, default:
+ 1.5
+)
+ –
+ Max allowed time difference +between consecutive samples.
+Returns:
+VelocityType
( VelocityType
+) –
+ Velocity in the order of (vx, vy, vz) in m/s.
+Currently, velocity coordinates is with respect to map, but +if should be each box.
+
project_pointcloud
+
+
+project_pointcloud(point_sample_data_token: str, camera_sample_data_token: str, min_dist: float = 1.0, *, ignore_distortion: bool = False) -> tuple[NDArrayF64, NDArrayF64, NDArrayU8]
+
Project pointcloud on image plane.
+ + +Parameters:
+point_sample_data_token
+ (str
)
+ –
+ Sample data token of lidar or radar sensor.
+camera_sample_data_token
+ (str
)
+ –
+ Sample data token of camera.
+min_dist
+ (float
, default:
+ 1.0
+)
+ –
+ Distance from the camera below which points are discarded.
+ignore_distortion
+ (bool
, default:
+ False
+)
+ –
+ Whether to ignore distortion parameters.
+Returns:
+tuple[NDArrayF64, NDArrayF64, NDArrayU8]
+ –
+ Projected points [2, n], their normalized depths [n] and an image.
+
render_scene
+
+
+render_scene(scene_token: str, *, max_time_seconds: float = np.inf, save_dir: str | None = None, show: bool = True) -> None
+
Render specified scene.
+ + +Parameters:
+scene_token
+ (str
)
+ –
+ Unique identifier of scene.
+max_time_seconds
+ (float
, default:
+ inf
+)
+ –
+ Max time length to be rendered [s].
+save_dir
+ (str | None
, default:
+ None
+)
+ –
+ Directory path to save the recording.
+show
+ (bool
, default:
+ True
+)
+ –
+ Whether to spawn rendering viewer.
+
render_instance
+
+
+Render particular instance.
+ + +Parameters:
+instance_token
+ (str
)
+ –
+ Instance token.
+save_dir
+ (str | None
, default:
+ None
+)
+ –
+ Directory path to save the recording.
+show
+ (bool
, default:
+ True
+)
+ –
+ Whether to spawn rendering viewer.
+
render_pointcloud
+
+
+render_pointcloud(scene_token: str, *, max_time_seconds: float = np.inf, ignore_distortion: bool = True, save_dir: str | None = None, show: bool = True) -> None
+
Render pointcloud on 3D and 2D view.
+ + +Parameters:
+scene_token
+ (str
)
+ –
+ Scene token.
+max_time_seconds
+ (float
, default:
+ inf
+)
+ –
+ Max time length to be rendered [s].
+save_dir
+ (str | None
, default:
+ None
+)
+ –
+ Directory path to save the recording.
+ignore_distortion
+ (bool
, default:
+ True
+)
+ –
+ Whether to ignore distortion parameters.
+show
+ (bool
, default:
+ True
+)
+ –
+ Whether to spawn rendering viewer.
+Add an option of rendering radar channels.
+viewer
Tier4Viewer
+
+
+Tier4Viewer(app_id: str, cameras: Sequence[str] | None = None, *, without_3d: bool = False, spawn: bool = True)
+
A viewer class that renders some components powered by rerun.
+ +Construct a new object.
+ + +Parameters:
+app_id
+ (str
)
+ –
+ Application ID.
+cameras
+ (Sequence[str] | None
, default:
+ None
+)
+ –
+ Sequence of camera names.
+If None
, any 2D spaces will not be visualized.
without_3d
+ (bool
, default:
+ False
+)
+ –
+ Whether to render objects without the 3D space.
+spawn
+ (bool
, default:
+ True
+)
+ –
+ Whether to spawn the viewer.
+Examples:
+>>> from t4_devkit.viewer import Tier4Viewer
+# Rendering both 3D/2D spaces
+>>> viewer = Tier4Viewer("myapp", cameras=["camera0", "camera1"])
+# Rendering 3D space only
+>>> viewer = Tier4Viewer("myapp")
+# Rendering 2D space only
+>>> viewer = Tier4Viewer("myapp", cameras=["camera0", "camera1"], without_3d=True)
+
with_labels
+
+
+Return myself after creating rr.AnnotationContext
on the recording.
Parameters:
+label2id
+ (dict[str, int]
)
+ –
+ Key-value mapping which maps label name to its ID.
+Returns:
+Self
+ –
+ Self instance.
+Examples:
+ + +
save
+
+
+Save recording result as save_dir/{app_id}.rrd
.
Parameters:
+save_dir
+ (str
)
+ –
+ Directory path to save the result.
+
render_box3ds
+
+
+Render 3D boxes.
+ + +Parameters:
+seconds
+ (float
)
+ –
+ Timestamp in [sec].
+boxes
+ (Sequence[Box3D]
)
+ –
+ Sequence of Box3D
s.
render_box2ds
+
+
+Render 2D boxes.
+ + +Parameters:
+seconds
+ (float
)
+ –
+ Timestamp in [sec].
+boxes
+ (Sequence[Box2D]
)
+ –
+ Sequence of Box2D
s.
render_segmentation2d
+
+
+render_segmentation2d(seconds: float, camera: str, masks: Sequence[NDArrayU8], class_ids: Sequence[int], uuids: Sequence[str | None] | None = None) -> None
+
Render 2D segmentation image.
+ + +Parameters:
+seconds
+ (float
)
+ –
+ Timestamp in [sec].
+camera
+ (str
)
+ –
+ Name of camera channel.
+masks
+ (Sequence[NDArrayU8]
)
+ –
+ Sequence of segmentation mask of each instance, +each mask is the shape of (W, H).
+class_ids
+ (Sequence[int]
)
+ –
+ Sequence of label ids.
+uuids
+ (Sequence[str | None] | None
, default:
+ None
+)
+ –
+ Sequence of each instance ID.
+
render_pointcloud
+
+
+Render pointcloud.
+ + +Parameters:
+seconds
+ (float
)
+ –
+ Timestamp in [sec].
+channel
+ (str
)
+ –
+ Name of the pointcloud sensor channel.
+pointcloud
+ (PointCloudLike
)
+ –
+ Inherence object of PointCloud
.
render_image
+
+
+Render an image.
+ + +Parameters:
+seconds
+ (float
)
+ –
+ Timestamp in [sec].
+camera
+ (str
)
+ –
+ Name of the camera channel.
+image
+ (str | NDArrayU8
)
+ –
+ Image tensor or path of the image file.
+
distance_color
+
+
+distance_color(distances: Number | ArrayLike, cmap: str | None = None, v_min: float = 3.0, v_max: float = 75.0) -> tuple[float, float, float] | NDArrayF64
+
Return color map depending on distance values.
+ + +Parameters:
+distances
+ (Number | ArrayLike
)
+ –
+ Array of distances in the shape of (N,).
+cmap
+ (str | None
, default:
+ None
+)
+ –
+ Color map name in matplotlib. If None, turbo_r
will be used.
v_min
+ (float
, default:
+ 3.0
+)
+ –
+ Min value to normalize.
+v_max
+ (float
, default:
+ 75.0
+)
+ –
+ Max value to normalize.
+Returns:
+tuple[float, float, float] | NDArrayF64
+ –
+ Color map in the shape of (N,). If input type is any number, returns a color as
+tuple[float, float, float]
. Otherwise, returns colors as NDArrayF64
.
format_entity
+
+
+Format entity path.
+ + +Parameters:
+root
+ (str
)
+ –
+ Root entity path.
+*entities
+ –
+ Entity path(s).
+Returns:
+str
+ –
+ Formatted entity path.
+Examples:
+ + +